pax_global_header00006660000000000000000000000064145555214240014521gustar00rootroot0000000000000052 comment=b3457a5648263c46dcd2c62672f79d9cc73735b5 uqfoundation-multiprocess-b3457a5/000077500000000000000000000000001455552142400173205ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/.codecov.yml000066400000000000000000000032051455552142400215430ustar00rootroot00000000000000comment: false coverage: status: project: default: # Commits pushed to master should not make the overall # project coverage decrease by more than 1%: target: auto threshold: 1% patch: default: # Be tolerant on slight code coverage diff on PRs to limit # noisy red coverage status on github PRs. # Note The coverage stats are still uploaded # to codecov so that PR reviewers can see uncovered lines # in the github diff if they install the codecov browser # extension: # https://github.com/codecov/browser-extension target: auto threshold: 1% fixes: # reduces pip-installed path to git root and # remove dist-name from setup-installed path - "*/python3.8/site-packages/::py3.8/" - "*/python3.9/site-packages/::py3.9/" - "*/python3.10/site-packages/::py3.10/" - "*/python3.11/site-packages/::py3.11/" - "*/python3.12/site-packages/::py3.12/" - "*/python3.13/site-packages/::py3.13/" - "*/pypy3.8-*/site-packages/::pypy3.8/" - "*/pypy3.9-*/site-packages/::pypy3.9/" - "*/pypy3.10-*/site-packages/::pypy3.10/" - "*/python3.8/site-packages/multiprocess-*::py3.8/" - "*/python3.9/site-packages/multiprocess-*::py3.9/" - "*/python3.10/site-packages/multiprocess-*::py3.10/" - "*/python3.11/site-packages/multiprocess-*::py3.11/" - "*/python3.12/site-packages/multiprocess-*::py3.12/" - "*/python3.13/site-packages/multiprocess-*::py3.13/" - "*/pypy3.8-*/site-packages/multiprocess-*::pypy3.8/" - "*/pypy3.9-*/site-packages/multiprocess-*::pypy3.9/" - "*/pypy3.10-*/site-packages/multiprocess-*::pypy3.10/" uqfoundation-multiprocess-b3457a5/.coveragerc000066400000000000000000000014421455552142400214420ustar00rootroot00000000000000[run] # source = multiprocess include = */multiprocess/* */multiprocess/dummy/* omit = */_multiprocess/* */examples/* */multiprocess/tests/* branch = true # timid = true # parallel = true # concurrency = multiprocessing # thread # data_file = $TRAVIS_BUILD_DIR/.coverage # data_file = ./.coverage # debug = trace [paths] source = multiprocess multiprocess/dummy */site-packages/multiprocess */site-packages/multiprocess/dummy */site-packages/multiprocess-*/multiprocess */site-packages/multiprocess-*/multiprocess/dummy [report] include = */multiprocess/* */multiprocess/dummy/* exclude_lines = pragma: no cover raise NotImplementedError if __name__ == .__main__.: # show_missing = true ignore_errors = true # pragma: no branch # noqa uqfoundation-multiprocess-b3457a5/.gitignore000066400000000000000000000000401455552142400213020ustar00rootroot00000000000000.tox/ .cache/ *.egg-info/ *.pyc uqfoundation-multiprocess-b3457a5/.readthedocs.yml000066400000000000000000000005151455552142400224070ustar00rootroot00000000000000# readthedocs configuration file # see https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 # configure sphinx: configuration: docs/source/conf.py # build build: os: ubuntu-22.04 tools: python: "3.10" # install python: install: - method: pip path: . - requirements: docs/requirements.txt uqfoundation-multiprocess-b3457a5/.travis.yml000066400000000000000000000054761455552142400214450ustar00rootroot00000000000000dist: jammy language: python matrix: include: - python: '3.8' env: - COVERAGE="true" - python: '3.9' env: - COVERAGE="true" - DILL="true" - python: '3.10' env: - python: '3.11' env: - python: '3.12' env: - python: '3.13-dev' env: - PYVERSION="3.13" - DILL="master" - python: 'pypy3.8-7.3.9' # is 7.3.11 env: - PYPY_VERSION="3.8" - python: 'pypy3.9-7.3.9' # is 7.3.15 env: - PYPY_VERSION="3.9" - python: 'pypy3.10-7.3.15' env: - PYPY_VERSION="3.10" allow_failures: - python: '3.13-dev' - python: 'pypy3.10-7.3.15' # CI missing fast_finish: true cache: pip: true before_install: - set -e # fail on any error - if [[ $COVERAGE == "true" ]]; then pip install coverage; fi - if [[ $DILL == "true" ]]; then pip install dill; fi - if [[ $DILL == "master" ]]; then pip install "https://github.com/uqfoundation/dill/archive/master.tar.gz"; fi install: - python -m pip install . script: - if [[ $PYVERSION != "3.13" ]]; then PYVERSION=$TRAVIS_PYTHON_VERSION ; fi - if [[ $PYVERSION == "pypy3.8-7.3.9" ]]; then PYVERSION=py$PYPY_VERSION ; fi - if [[ $PYVERSION == "pypy3.9-7.3.9" ]]; then PYVERSION=py$PYPY_VERSION ; fi - if [[ $PYVERSION == "pypy3.10-7.3.15" ]]; then PYVERSION=py$PYPY_VERSION ; fi - cd py$PYVERSION #XXX: bash script may require tests run from root - if [[ $COVERAGE == "true" ]]; then cp ../.coveragerc .coveragerc ; fi - for test in multiprocess/tests/__init__.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done - if [[ $PYVERSION == *"py3"* ]]; then for test in multiprocess/tests/test_multiprocessing_fork.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done ; elif [[ $PYVERSION == "3.11" || $PYVERSION == "3.12" || $PYVERSION == "3.13" ]]; then for test in multiprocess/tests/*.py multiprocess/tests/test_*/*.py; do if [[ $test != *"__"* && $test != *"mp_"* ]]; then echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; fi ; done ; else for test in multiprocess/tests/*.py; do if [[ $test != *"__"* && $test != *"mp_"* ]]; then echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; fi; done ; fi - cd .. after_success: - cd py$PYVERSION - if [[ $COVERAGE == "true" ]]; then bash <(curl -s https://codecov.io/bash); else echo ''; fi - if [[ $COVERAGE == "true" ]]; then coverage report; fi - cd .. uqfoundation-multiprocess-b3457a5/COPYING000066400000000000000000000027251455552142400203610ustar00rootroot00000000000000Copyright (c) 2006-2008, R Oudkerk All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uqfoundation-multiprocess-b3457a5/LICENSE000066400000000000000000000036121455552142400203270ustar00rootroot00000000000000Copyright (c) 2008-2016 California Institute of Technology. Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. All rights reserved. This software forks the python package "multiprocessing". Licence and copyright information for multiprocessing can be found in "COPYING". This software is available subject to the conditions and terms laid out below. By downloading and using this software you are agreeing to the following conditions. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the names of the copyright holders nor the names of any of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uqfoundation-multiprocess-b3457a5/MANIFEST.in000066400000000000000000000011071455552142400210550ustar00rootroot00000000000000include COPYING include LICENSE include README* include MANIFEST.in include pyproject.toml include tox.ini include version.py recursive-include py3.10 * recursive-include py3.11 * recursive-include py3.12 * recursive-include py3.13 * recursive-include py3.8 * recursive-include py3.9 * recursive-include pypy3.10 * recursive-include pypy3.8 * recursive-include pypy3.9 * include .* prune .git prune .coverage prune .eggs prune py2.5 prune py2.6 prune py2.7 prune py3.1 prune py3.2 prune py3.3 prune py3.4 prune py3.5 prune py3.6 prune py3.7 prune pypy2.7 prune pypy3.6 prune pypy3.7 uqfoundation-multiprocess-b3457a5/README.md000066400000000000000000000150151455552142400206010ustar00rootroot00000000000000multiprocess ============ better multiprocessing and multithreading in Python About Multiprocess ------------------ ``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6. ``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing. ``multiprocess`` is in active development, so any user feedback, bug reports, comments, or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. Major Features -------------- ``multiprocess`` enables: * objects to be transferred between processes using pipes or multi-producer/multi-consumer queues * objects to be shared between processes using a server process or (for simple data) shared memory ``multiprocess`` provides: * equivalents of all the synchronization primitives in ``threading`` * a ``Pool`` class to facilitate submitting tasks to worker processes * enhanced serialization, using ``dill`` Current Release [![Downloads](https://static.pepy.tech/personalized-badge/multiprocess?period=total&units=international_system&left_color=grey&right_color=blue&left_text=pypi%20downloads)](https://pepy.tech/project/multiprocess) [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/multiprocess?color=blue&label=conda%20downloads)](https://anaconda.org/conda-forge/multiprocess) [![Stack Overflow](https://img.shields.io/badge/stackoverflow-get%20help-black.svg)](https://stackoverflow.com/questions/tagged/multiprocess) --------------- The latest released version of ``multiprocess`` is available from: https://pypi.org/project/multiprocess ``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``. Development Version [![Support](https://img.shields.io/badge/support-the%20UQ%20Foundation-purple.svg?style=flat&colorA=grey&colorB=purple)](http://www.uqfoundation.org/pages/donate.html) [![Documentation Status](https://readthedocs.org/projects/multiprocess/badge/?version=latest)](https://multiprocess.readthedocs.io/en/latest/?badge=latest) [![Build Status](https://travis-ci.com/uqfoundation/multiprocess.svg?label=build&logo=travis&branch=master)](https://travis-ci.com/github/uqfoundation/multiprocess) [![codecov](https://codecov.io/gh/uqfoundation/multiprocess/branch/master/graph/badge.svg)](https://codecov.io/gh/uqfoundation/multiprocess) ------------------- You can get the latest development version with all the shiny new features at: https://github.com/uqfoundation If you have a new contribution, please submit a pull request. Installation ------------ ``multiprocess`` can be installed with ``pip``:: $ pip install multiprocess For Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler. Requirements ------------ ``multiprocess`` requires: * ``python`` (or ``pypy``), **>=3.8** * ``setuptools``, **>=42** * ``dill``, **>=0.3.8** Basic Usage ----------- The ``multiprocess.Process`` class follows the API of ``threading.Thread``. For example :: from multiprocess import Process, Queue def f(q): q.put('hello world') if __name__ == '__main__': q = Queue() p = Process(target=f, args=[q]) p.start() print (q.get()) p.join() Synchronization primitives like locks, semaphores and conditions are available, for example :: >>> from multiprocess import Condition >>> c = Condition() >>> print (c) ), 0> >>> c.acquire() True >>> print (c) ), 0> One can also use a manager to create shared objects either in shared memory or in a server process, for example :: >>> from multiprocess import Manager >>> manager = Manager() >>> l = manager.list(range(10)) >>> l.reverse() >>> print (l) [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] >>> print (repr(l)) Tasks can be offloaded to a pool of worker processes in various ways, for example :: >>> from multiprocess import Pool >>> def f(x): return x*x ... >>> p = Pool(4) >>> result = p.map_async(f, range(10)) >>> print (result.get(timeout=1)) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] When ``dill`` is installed, serialization is extended to most objects, for example :: >>> from multiprocess import Pool >>> p = Pool(4) >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10))) [0, 2, 6, 12, 20, 30, 42, 56, 72, 90] More Information ---------------- Probably the best way to get started is to look at the documentation at http://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that demonstrate how ``multiprocess`` can be used to leverge multiple processes to execute Python in parallel. You can run the test suite with ``python -m multiprocess.tests``. As ``multiprocess`` conforms to the ``multiprocessing`` interface, the examples and documentation found at http://docs.python.org/library/multiprocessing.html also apply to ``multiprocess`` if one will ``import multiprocessing as multiprocess``. See https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples for a set of examples that demonstrate some basic use cases and benchmarking for running Python code in parallel. Please feel free to submit a ticket on github, or ask a question on stackoverflow (**@Mike McKerns**). If you would like to share how you use ``multiprocess`` in your work, please send an email (to **mmckerns at uqfoundation dot org**). Citation -------- If you use ``multiprocess`` to do research that leads to publication, we ask that you acknowledge use of ``multiprocess`` by citing the following in your publication:: M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, "Building a framework for predictive science", Proceedings of the 10th Python in Science Conference, 2011; http://arxiv.org/pdf/1202.1056 Michael McKerns and Michael Aivazis, "pathos: a framework for heterogeneous computing", 2010- ; https://uqfoundation.github.io/project/pathos Please see https://uqfoundation.github.io/project/pathos or http://arxiv.org/pdf/1202.1056 for further information. uqfoundation-multiprocess-b3457a5/_misc/000077500000000000000000000000001455552142400204125ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/_misc/doc/000077500000000000000000000000001455552142400211575ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/_misc/doc/CHANGES.txt000066400000000000000000000502551455552142400227770ustar00rootroot00000000000000.. default-role:: literal ========================== Changelog for processing ========================== Changes in 0.52 --------------- * On versions 0.50 and 0.51 Mac OSX `Lock.release()` would fail with `OSError(errno.ENOSYS, "[Errno 78] Function not implemented")`. This appears to be because on Mac OSX `sem_getvalue()` has not been implemented. Now `sem_getvalue()` is no longer needed. Unfortunately, however, on Mac OSX `BoundedSemaphore()` will not raise `ValueError` if it exceeds its initial value. * Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs. * The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias. Changes in 0.51 --------------- * In 0.50 `processing.Value()` and `processing.sharedctypes.Value()` were related but had different signatures, which was rather confusing. Now `processing.sharedctypes.Value()` has been renamed `processing.sharedctypes.RawValue()` and `processing.sharedctypes.Value()` is the same as `processing.Value()`. * In version 0.50 `sendfd()` and `recvfd()` apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40. However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of `sendfd()` and `recvfd()` by setting `macros['HAVE_FD_TRANSFRER'] = 0` in `setup.py`. * Fixed an authentication error when using a "remote" manager created using `BaseManager.from_address()`. * Fixed a couple of bugs which only affected Python 2.4. Changes in 0.50 --------------- * `ctypes` is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately. * `LocalManager()` has been removed. * Added `processing.Value()` and `processing.Array()` which are similar to `LocalManager.SharedValue()` and `LocalManager.SharedArray()`. * In the `sharedctypes` module `new_value()` and `new_array()` have been renamed `Value()` and `Array()`. * `Process.stop()`, `Process.getStoppable()` and `Process.setStoppable()` have been removed. Use `Process.terminate()` instead. * `procesing.Lock` now matches `threading.Lock` behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception. * On Windows when the main thread is blocking on a method of `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition` it will no longer ignore Ctrl-C. (The same was already true on Unix.) This differs from the behaviour of the equivalent objects in `threading` which will completely ignore Ctrl-C. * The `test` sub-package has been replaced by lots of unit tests in a `tests` sub-package. Some of the old test files have been moved over to a new `examples` sub-package. * On Windows it is now possible for a non-console python program (i.e. one using `pythonw.exe` instead of `python.exe`) to use `processing`. Previously an exception was raised when `subprocess.py` tried to duplicate stdin, stdout, stderr. * Proxy objects should now be thread safe -- they now use thread local storage. * Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise `RuntimeError` with a message saying that the object should only be shared between processes using inheritance. Previously, this worked unreliably on Windows but would fail with an unexplained `AssertionError` on Unix. * The names of some of the macros used for compiling the extension have changed. See `INSTALL.txt` and `setup.py`. * A few changes which (hopefully) make compilation possible on Solaris. * Lots of refactoring of the code. * Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux). Changes in 0.40 --------------- * Removed `SimpleQueue` and `PosixQueue` types. Just use `Queue` instead. * Previously if you forgot to use the :: if __name__ == '__main__': freezeSupport() ... idiom on Windows then processes could be created recursively bringing the computer to its knees. Now `RuntimeError` will be raised instead. * Some refactoring of the code. * A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed. Changes in 0.39 --------------- * One can now create one-way pipes by doing `reader, writer = Pipe(duplex=False)`. * Rewrote code for managing shared memory maps. * Added a `sharedctypes` module for creating `ctypes` objects allocated from shared memory. On Python 2.4 this requires the installation of `ctypes`. `ctypes` objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a `LocalManager`. * Rearranged documentation. * Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used `int` instead of `Py_ssize_t` in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report. * A fix for `Pool.terminate()`. * A fix for cleanup behaviour of `Queue`. Changes in 0.38 --------------- * Have revamped the queue types. Now the queue types are `Queue`, `SimpleQueue` and (on systems which support it) `PosixQueue`. Now `Queue` should behave just like Python's normal `Queue.Queue` class except that `qsize()`, `task_done()` and `join()` are not implemented. In particular, if no maximum size was specified when the queue was created then `put()` will always succeed without blocking. A `SimpleQueue` instance is really just a pipe protected by a couple of locks. It has `get()`, `put()` and `empty()` methods but does not not support timeouts or non-blocking. `BufferedPipeQueue()` and `PipeQueue()` remain as deprecated aliases of `Queue()` but `BufferedPosixQueue()` has been removed. (Not sure if we really need to keep `PosixQueue()`...) * Previously the `Pool.shutdown()` method was a little dodgy -- it could block indefinitely if `map()` or `imap*()` were used and did not try to terminate workers while they were doing a task. Now there are three new methods `close()`, `terminate()` and `join()` -- `shutdown()` is retained as a deprecated alias of `terminate()`. Thanks to Gerald John M. Manipon for feature request/suggested patch to `shutdown()`. * `Pool.imap()` and `Pool.imap_unordered()` has gained a `chunksize` argument which allows the iterable to be submitted to the pool in chunks. Choosing `chunksize` appropriately makes `Pool.imap()` almost as fast as `Pool.map()` even for long iterables and cheap functions. * Previously on Windows when the cleanup code for a `LocalManager` attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a `LocalManager` instance. Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin. * `types.MethodType` is registered with `copy_reg` so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.) This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object. * On Windows `reduction.fromfd()` now returns true instances of `_socket.socket`, so there is no more need for the `_processing.falsesocket` type. Changes in 0.37 --------------- * Updated metadata and documentation because the project is now hosted at `developer.berlios.de/projects/pyprocessing`. * The `Pool.join()` method has been removed. `Pool.shutdown()` will now join the worker processes automatically. * A pool object no longer participates in a reference cycle so `Pool.shutdown()` should get called as soon as its reference count falls to zero. * On Windows if `enableLogging()` was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in `enableLogging()` will cause any previous handlers used by the logger to be discarded. Changes in 0.36 --------------- * In recent versions on Unix the finalizers in a manager process were never given a chance to run before `os._exit()` was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed. * The shutting down of managers has been cleaned up. * In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed. * In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed. * Previously the arguments to `processing.Semaphore()` and `processing.BoundedSemaphore()` did not have any defaults. The defaults should be 1 to match `threading`. Fixed. * It should now be possible for a Windows Service created by using `pywin32` to spawn processes using the `processing` package. Note that `pywin32` apparently has a bug meaning that `Py_Finalize()` is never called when the service exits so functions registered with `atexit` never get a chance to run. Therefore it is advisable to explicitly call `sys.exitfunc()` or `atexit._run_exitfuncs()` at the end of `ServiceFramework.DoSvcRun()`. Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report. * Added `getLogger()` and `enableLogging()` to support logging. Changes in 0.35 --------------- * By default processes are no longer be stoppable using the `stop()` method: one must call `setStoppable(True)` before `start()` in order to use the `stop()` method. (Note that `terminate()` will work regardless of whether the process is marked as being "stoppable".) The reason for this is that on Windows getting `stop()` to work involves starting a new console for the child process and installing a signal handler for the `SIGBREAK` signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program. * Added `setStoppable()` and `getStoppable()` methods -- see above. * Added `BufferedQueue`/`BufferedPipeQueue`/`BufferedPosixQueue`. Putting an object on a buffered queue will always succeed without blocking (just like with `Queue.Queue` if no maximum size is specified). This makes them potentially safer than the normal queue types provided by `processing` which have finite capacity and may cause deadlocks if they fill. `test/test_worker.py` has been updated to use `BufferedQueue` for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock. * Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of `sem_timedwait()`. This means that `Condition.wait()` and `Queue.get()` should now work with timeouts on Mac OS X. * Added a `callback` argument to `Pool.apply_async()`. * Added `test/test_httpserverpool.py` which runs a pool of http servers which share a single listening socket. * Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket. * Fixed bug in the iterator returned by `Pool.imap()`. * Fixed bug in `Condition.__repr__()`. * Fixed a handle/file descriptor leak when sockets or connections are unpickled. Changes in 0.34 --------------- * Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the `ImportError` exception was silently swallowed. This is now fixed by using the `NO_SEM_TIMED` macro. Unfortunately this means that some methods like `Condition.wait()` and `Queue.get()` will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing. * Added a `terminate()` method to process objects which is more forceful than `stop()`. * Fixed bug in the cleanup function registered with `atexit` which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing. * Added `test/test_workers.py` which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another. * Added `processing.Pool()` which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the `map()` builtin. This is still *experimental* and undocumented --- see `test/test_pool.py` for example usage. Changes in 0.33 --------------- * Added a `recvbytes_into()` method for receiving byte data into objects with the writable buffer interface. Also renamed the `_recv_string()` and `_send_string()` methods of connection objects to `recvbytes()` and `sendbytes()`. * Some optimizations for the transferring of large blocks of data using connection objects. * On Unix `os.sysconf()` is now used by default to determine whether to compile in support for posix semaphores or posix message queues. By using the `NO_SEM_TIMED` and `NO_MQ_TIMED` macros (see `INSTALL.txt`) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions `sem_timedwait()` or `mq_timedreceive()` and `mq_timesend()`. * `gettimeofday()` is now used instead of `clock_gettime()` making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of `setup.py` should be necessary. Thanks to Michele Bertoldi for report and proposed patch. * `cpuCount()` function added which returns the number of CPUs in the system. * Bugfixes to `PosixQueue` class. Changes in 0.32 --------------- * Refactored and simplified `_nonforking` module -- info about `sys.modules` of parent process is no longer passed on to child process. Also `pkgutil` is no longer used. * Allocated space from an mmap used by `LocalManager` will now be recycled. * Better tests for `LocalManager`. * Fixed bug in `managers.py` concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report. * Added a `freezeSupport()` function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) Changes in 0.31 --------------- * Fixed one line bug in `localmanager.py` which caused shared memory maps not to be resized properly. * Added tests for shared values/structs/arrays to `test/test_processing`. Changes in 0.30 ---------------- * Process objects now support the complete API of thread objects. In particular `isAlive()`, `isDaemon()`, `setDaemon()` have been added and `join()` now supports the `timeout` paramater. There are also new methods `stop()`, `getPid()` and `getExitCode()`. * Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores. * Added support for sharing simple objects between processes by using a shared memory map and the `struct` or `array` modules. * An `activeChildren()` function has been added to `processing` which returns a list of the child processes which are still alive. * A `Pipe()` function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent. * socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the `_processing` extension.) * Subclasses of `managers.BaseManager` no longer automatically spawn a child process when an instance is created: the `start()` method must be called explicitly. * On Windows child processes are now spawned using `subprocess`. * On Windows the Python 2.5 version of `pkgutil` is now used for loading modules by the `_nonforking` module. On Python 2.4 this version of `pkgutil` (which uses the standard Python licence) is included in `processing.compat`. * The arguments to the functions in `processing.connection` have changed slightly. * Connection objects now have a `poll()` method which tests whether there is any data available for reading. * The `test/py2exedemo` folder shows how to get `py2exe` to create a Windows executable from a program using the `processing` package. * More tests. * Bugfixes. * Rearrangement of various stuff. Changes in 0.21 --------------- * By default a proxy is now only able to access those methods of its referent which have been explicitly exposed. * The `connection` sub-package now supports digest authentication. * Process objects are now given randomly generated 'inheritable' authentication keys. * A manager process will now only accept connections from processes using the same authentication key. * Previously `get_module()` from `_nonforking.py` was seriously messed up (though it generally worked). It is a lot saner now. * Python 2.4 or higher is now required. Changes in 0.20 --------------- * The `doc` folder contains HTML documentation. * `test` is now a subpackage. Running `processing.test.main()` will run test scripts using both processes and threads. * `nonforking.py` has been renamed `_nonforking.py`. `manager.py` has been renamed `manager.py`. `connection.py` has become a sub-package `connection` * `Listener` and `Client` have been removed from `processing`, but still exist in `processing.connection`. * The package is now *probably* compatible with versions of Python earlier than 2.4. * `set` is no longer a type supported by the default manager type. * Many more changes. Changes in 0.12 --------------- * Fixed bug where the arguments to `processing.Manager()` were passed on to `processing.manager.DefaultManager()` in the wrong order. * `processing.dummy` is now a subpackage of `processing` instead of a module. * Rearranged package so that the `test` folder, `README.txt` and `CHANGES.txt` are copied when the package is installed. Changes in 0.11 --------------- * Fixed bug on windows when the full path of `nonforking.py` contains a space. * On unix there is no longer a need to make the arguments to the constructor of `Process` be picklable or for and instance of a subclass of `Process` to be picklable when you call the start method. * On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to `Process`. (This will never be possible on windows.) uqfoundation-multiprocess-b3457a5/_misc/doc/INSTALL.txt000066400000000000000000000032561455552142400230340ustar00rootroot00000000000000.. default-role:: literal ============================ Installation of processing ============================ Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the `ctypes` package (which comes automatically with Python 2.5). Windows binary builds for Python 2.4 and Python 2.5 are available at http://pyprocessing.berlios.de or http://pypi.python.org/pypi/processing Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:: python setup.py install It should not be necessary to do any editing of `setup.py` if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the `macros` dictionary or `libraries` list. The section to modify reads :: else: macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1 ) libraries = ['rt'] More details can be found in the comments in `setup.py`. Note that if you use `HAVE_SEM_OPEN=0` then support for posix semaphores will not been compiled in, and then many of the functions in the `processing` namespace like `Lock()`, `Queue()` or will not be available. However, one can still create a manager using `manager = processing.Manager()` and then do `lock = manager.Lock()` etc. Running tests ------------- To run the test scripts using Python 2.5 do :: python -m processing.tests and on Python 2.4 do :: python -c "from processing.tests import main; main()" This will run a number of test scripts using both processes and threads. uqfoundation-multiprocess-b3457a5/_misc/doc/THANKS.txt000066400000000000000000000007231455552142400227120ustar00rootroot00000000000000======== Thanks ======== Thanks to everyone who has offered bug reports, patches, suggestions: Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright. Sorry if I have forgotten anyone.uqfoundation-multiprocess-b3457a5/docs/000077500000000000000000000000001455552142400202505ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/docs/Makefile000066400000000000000000000010461455552142400217110ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = multiprocess SOURCEDIR = source BUILDDIR = build # Internal variables ALLSPHINXOPTS = $(SPHINXOPTS) $(SOURCEDIR) # Put it first so that "make" without argument is like "make help". help: @echo "Please use \`make html' to generate standalone HTML files" .PHONY: help clean html Makefile clean: -rm -rf $(BUILDDIR) html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR) uqfoundation-multiprocess-b3457a5/docs/requirements.txt000066400000000000000000000015461455552142400235420ustar00rootroot00000000000000# Packages required to build docs # dependencies pinned as: # https://github.com/readthedocs/readthedocs.org/blob/4dd655eaa5a36aa2cb9eed3e98961419536f99e8/requirements/docs.txt alabaster==0.7.13 babel==2.12.1 certifi==2023.7.22 charset-normalizer==3.2.0 click==8.1.6 colorama==0.4.6 docutils==0.18.1 idna==3.4 imagesize==1.4.1 jinja2==3.1.3 livereload==2.6.3 markdown-it-py==3.0.0 markupsafe==2.1.3 mdit-py-plugins==0.4.0 mdurl==0.1.2 myst-parser==2.0.0 packaging==23.1 pygments==2.16.1 pyyaml==6.0.1 readthedocs-sphinx-search==0.3.2 requests==2.31.0 six==1.16.0 snowballstemmer==2.2.0 sphinx==6.2.1 sphinx-autobuild==2021.3.14 sphinx-copybutton==0.5.2 sphinx-design==0.5.0 sphinx-hoverxref==1.3.0 sphinx-intl==2.1.0 sphinx-multiproject==1.0.0rc1 sphinx-notfound-page==0.8.3 sphinx-prompt==1.6.0 sphinx-rtd-theme==1.2.2 sphinx-tabs==3.4.1 tornado==6.3.3 urllib3==2.0.7 uqfoundation-multiprocess-b3457a5/docs/source/000077500000000000000000000000001455552142400215505ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/docs/source/_static/000077500000000000000000000000001455552142400231765ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/docs/source/_static/css/000077500000000000000000000000001455552142400237665ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/docs/source/_static/css/custom.css000066400000000000000000000001251455552142400260100ustar00rootroot00000000000000div.sphinxsidebar { height: 100%; /* 100vh */ overflow: auto; /* overflow-y */ } uqfoundation-multiprocess-b3457a5/docs/source/conf.py000066400000000000000000000174421455552142400230570ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # multiprocess documentation build configuration file, created by # sphinx-quickstart on Tue Aug 8 06:50:58 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os from datetime import datetime #import sys # sys.path.insert(0, os.path.abspath('../../scripts')) # Import the project import multiprocess # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'multiprocess' year = datetime.now().year copyright = '%d, The Uncertainty Quantification Foundation' % year author = 'Mike McKerns' # extension config github_project_url = "https://github.com/uqfoundation/multiprocess" autoclass_content = 'both' autodoc_default_options = { 'members': True, 'undoc-members': True, 'private-members': True, 'special-members': True, 'show-inheritance': True, 'imported-members': True, 'exclude-members': ( '__dict__,' '__slots__,' '__weakref__,' '__module__,' '_abc_impl,' '__init__,' '__annotations__,' '__dataclass_fields__,' ) } autodoc_typehints = 'description' autodoc_typehints_format = 'short' napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_ivar = True napoleon_use_param = True # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = multiprocess.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # Configure how the modules, functions, etc names look add_module_names = False modindex_common_prefix = ['multiprocess.'] # -- Options for HTML output ---------------------------------------------- # on_rtd is whether we are on readthedocs.io on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # if not on_rtd: html_theme = 'alabaster' #'bizstyle' html_css_files = ['css/custom.css',] #import sphinx_rtd_theme #html_theme = 'sphinx_rtd_theme' #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'github_user': 'uqfoundation', 'github_repo': 'multiprocess', 'github_button': False, 'github_banner': True, 'travis_button': True, 'codecov_button': True, 'donate_url': 'http://uqfoundation.org/pages/donate.html', 'gratipay_user': False, # username 'extra_nav_links': {'Module Index': 'py-modindex.html'}, # 'show_related': True, # 'globaltoc_collapse': True, 'globaltoc_maxdepth': 4, 'show_powered_by': False } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars if on_rtd: toc_style = 'localtoc.html', # display the toctree else: toc_style = 'globaltoc.html', # collapse the toctree html_sidebars = { '**': [ 'about.html', 'donate.html', 'searchbox.html', # 'navigation.html', toc_style, # defined above 'relations.html', # needs 'show_related':True option to display ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'multiprocessdoc' # Logo for sidebar html_logo = 'pathos.png' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'multiprocess.tex', 'multiprocess Documentation', 'Mike McKerns', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'multiprocess', 'multiprocess Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'multiprocess', 'multiprocess Documentation', author, 'multiprocess', 'Better multiprocessing and multithreading in python.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/3/': None} # {'python': {'https://docs.python.org/': None}, # 'mystic': {'https://mystic.readthedocs.io/en/latest/', None}, # 'pathos': {'https://pathos.readthedocs.io/en/latest/', None}, # 'pox': {'https://pox.readthedocs.io/en/latest/', None}, # 'dill': {'https://dill.readthedocs.io/en/latest/', None}, # 'ppft': {'https://ppft.readthedocs.io/en/latest/', None}, # 'klepto': {'https://klepto.readthedocs.io/en/latest/', None}, # 'pyina': {'https://pyina.readthedocs.io/en/latest/', None}, # } uqfoundation-multiprocess-b3457a5/docs/source/dummy.rst000066400000000000000000000002741455552142400234400ustar00rootroot00000000000000multiprocess.dummy module documentation ======================================= connection module ----------------- .. automodule:: multiprocess.dummy.connection .. :exclude-members: + uqfoundation-multiprocess-b3457a5/docs/source/index.rst000066400000000000000000000005121455552142400234070ustar00rootroot00000000000000.. multiprocess documentation master file multiprocess package documentation ================================== .. toctree:: :hidden: :maxdepth: 2 self multiprocess .. automodule:: multiprocess .. :exclude-members: + Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` uqfoundation-multiprocess-b3457a5/docs/source/multiprocess.rst000066400000000000000000000043321455552142400250350ustar00rootroot00000000000000multiprocess module documentation ================================= connection module ----------------- .. automodule:: multiprocess.connection .. :exclude-members: + context module -------------- .. automodule:: multiprocess.context .. :exclude-members: + dummy module ------------ .. toctree:: :titlesonly: :maxdepth: 2 dummy .. automodule:: multiprocess.dummy .. :exclude-members: + forkserver module ----------------- .. automodule:: multiprocess.forkserver .. :exclude-members: + heap module ----------- .. automodule:: multiprocess.heap .. :exclude-members: + managers module --------------- .. automodule:: multiprocess.managers .. :exclude-members: + pool module ----------- .. automodule:: multiprocess.pool .. :exclude-members: + popen_fork module ----------------- .. automodule:: multiprocess.popen_fork .. :exclude-members: + popen_forkserver module ----------------------- .. automodule:: multiprocess.popen_forkserver .. :exclude-members: + popen_spawn_posix module ------------------------ .. automodule:: multiprocess.popen_spawn_posix .. :exclude-members: + .. popen_spawn_win32 module .. ------------------------ .. .. automodule:: multiprocess.popen_spawn_win32 .. :exclude-members: + process module -------------- .. automodule:: multiprocess.process .. :exclude-members: + queues module ------------- .. automodule:: multiprocess.queues .. :exclude-members: + reduction module ---------------- .. automodule:: multiprocess.reduction .. :exclude-members: + resource_sharer module ---------------------- .. automodule:: multiprocess.resource_sharer .. :exclude-members: + resource_tracker module ----------------------- .. automodule:: multiprocess.resource_tracker .. :exclude-members: + shared_memory module -------------------- .. automodule:: multiprocess.shared_memory .. :exclude-members: + sharedctypes module ------------------- .. automodule:: multiprocess.sharedctypes .. :exclude-members: + spawn module ------------ .. automodule:: multiprocess.spawn .. :exclude-members: + synchronize module ------------------ .. automodule:: multiprocess.synchronize .. :exclude-members: + util module ----------- .. automodule:: multiprocess.util .. :exclude-members: + uqfoundation-multiprocess-b3457a5/docs/source/pathos.png000066400000000000000000002314661455552142400235700ustar00rootroot00000000000000‰PNG  IHDRI(Õ"WCiCCPICC Profilex­Xy8”Ýß?³ÛØ÷¥LŠ-»0²ïk¶ìfˆÉ0ƒ±'¥EÙ—²D¶„EDòDH¶¥RŠHJ$û’ð»‡zžë÷¾×s½ÿ¼çºæ>Ÿó9ßí>ßsæœsÀ&èA&á?J µ6Æá¸#†ö€À¤—>ˆ¬eii ‰üKYy `Ô®)ª­ú7š9rLàôÞÅG©·‹m¨8”B¦@2>TŒ÷ñð„ð)KÚXë@¸ ÂÌÞ»øãvq;‡à½©ºC Øý= þÐÎ@ë鄇º©~==ƒð~NŽõó#AöY_B¼žé²®Aøu\ *¨i@6Ûÿá\îPIKó?ÜÁ 8hÊù‡[´Þ+woÐ y¹s0´64#ÛÛ‹¢Plil¦noomooÞñ€§D|p`ÈŽ,äÖ ÀÿÕÞ}çß(9P‚aü°.x<©I#‰’ 5¡‹¢Æ(‚Îgb©f³å çläãUç‡ 4í9-$‹™N? /òê ¿Ø–D²$·T† ·lº½ü…U%¼r—êá# j3ÆØBÍU-SílÏz’úþe†£Æ¬&š¦³ó*‹!+zkc¡6wlGì™ÔŸtÌvjqžreuSt·óÅeà«=û½f½‘>\á“2¾*Dm?cs’Ù:À<Ð8H‹¢,"ʺ6þ<"?’xJ%Š6jèô­èè3vgåbXbæÎ ž|¡èb|lØ%ïËvqÆñ Wû’S´SCÓŠÓ{¯¬fðgªfÙdû^‹ÊIÏÍ¿~/¯>¿½`¨ðKÑâ­›t%¥‚enIÞ–¿£zW­\ãžæý£G+±Ô«ŽT+=”«‘­•~$Y'þX¤~ß_üOX +_ž¾mzÞ\Ñ’û,¶•òܵM¿]ºƒ½ã[gcWÒ çn±îùžÚÞ°—Ê/—ú*û û†S_é¿Úx}ÿëÃPå[»·?ß]Vîïûù!gDn¤cÔ}tícú˜ÌXç8áçª ×Ï Ÿ«']¾ ¾Üýjõui*ýÛáo=Ó„øLöwéïͳv³S?bæöÎ5͸ÚÏ,a—‘Ëý+·WãÖ×=~ÚoØü²ßtÝòÙ6ÝÞ†ò‡Ã]|ˆÈzšÛ¨ Ú.º%uÆ+èEfË [+g ·.Ï(_¨«àõ½²B•ûä…Ë`DD—ÅlÅ+ÑKºKݓޒ5;œ"7¨À©h¡” Üªò눔š«z¢F ö½æ––€¶‚Ž®£AŸbpÊ0Ö¨Àø©ÉˆéOs. iK=+'kʱK6Ù¶·íÛ·;¼:>æøÝiÍîŠrCºm»¯{,à¦ñŸ<ßyõhõ®ó)'äŸLö=C òóð·"iWê#ƒ4(JGpRˆ}èþй°'áñN‘‡"7OõFÝ8mtæàY®æstçáç^˜¿8ûéÒð徸¶ø§ µ‰åIÅÉ9)©©ÓΤ‡] ¼JÌÀgºd9d[\ÓÏÑÈ•¿.ž'˜Ïœ¿]ð£p´èÅÚâ›7ÓKRKÓʲoåÞ.¹s÷nuù£{M÷»+^WŽ=˜©ZHSÃY+üHºNý±A½é_ÆOô´UžŠ5q7Û§Z:ŸÝj=ûÜ®M¢m£½³#£Ó½ëP×ê‹–îÄ»^ÁÞ/óûœûyúûâu½ªyM~#ñæËÐÍ·žïDßM W¼ù€AôŽæ|ô“‡÷~*˜ü¬7É7ùãKÓ×Ì)¿o:Ó|Ós3=ßkfoüȜ˜Ï_¨XìZZ^]Å­¯ÏmhÿÊܜۖÚÉ¿)\ Þ…G*"Ò´¡JhÓè.Ó§1”0>G/0‹°àXo°}áPãÌç†ñàyŸòcB»÷ ÄTïÛÜ=)RÍ q7‰ôC’£RË2(Y¾ÃbrJòš Š&JfÊÆ*úªjGäÕ0ê,ê_°½šµG µ´ÃuNèÓÓÓ—52D®M˜4˜–™¥š‡Y¸ZêX‰Ysƒûa3dÛhW`åàpüÐñMǧëÎ']]®/ÝòÜý<4q\¸ølOcÏm¯ÚAÞ Þë>̈́箾²Dñµß-ÿp’!™‹ü1 8Ð5ˆ;¨‡r!X-øGHq¨SKXs8%B,â]dÂ)쩹¨ÂÓÖÑtÑmgRϺÇHÇlë=ŸwxQ=;|©ì2%îh¿1cEimbãΖ5ÿ»{uO@)p“ »ÌïP@@4 v–Lبxp3€£_X ð{ÿ€ö&°È#€à (í`ü‚ñÀäa0_Ø%ØMX3ì#l.ÇÂÝàgàEðVø‚¡€pFœG”#Þ ÈÃH7d²¹@#JãH“JÓB °¨T-j…V™6Œ¶žv‹N.‘î ½0}}# ƒ;C5#=£;ã#4š„îa’aJcZgÆ1÷°h°ÜcfÍbcgKb§cf_æ q|ãôæœäòášæ&s/óœæeàÍâ;ÈWÇoÎ?!%È+X½ÇzÏÒÞ!¡eLÉ>'aVáŽý±ôEDD¯$‰éРНJ z"Y"U(}_¦Y¶ÿð„Ü¢L­Ä£¼WEDõ4Ç•ÕÕ5´±:šbš3Gi]жÕÓÙÒ}§÷H?Ëà´!ÁÈÎØÈDËk¦b®a¡eiheeíp gC²¶K·/uxrüã‚3«‹¬«[”{©Ç î©àå{¢Ð{˜À{ÒÖ7ØïÏA²'ç| R¥¤O…†•F0GFúvÚ1ºã¬vLÃyÕ ý±¡—ÅãFŠ’RÌÒ¯g`²®íÏ=œ§[à_TyQê}ëó]ÿûðÊ‚jšÞ:|ý÷›§Zö¶&·£;³»5zgû+^]òv‘û8þ)øóüWëo™3M³s= —2W|Öd ÿòMoêüÀö5`H ”€0 eŸ¦³ƒîÀª`ƒ°e8üÜ/…÷À—{F ¢Ñ‹ØFÊ"ñÈLd Šæ(M$MÍ:ê* ÕDKGkE{ö3Ýyº×ôôgé‡ä¦Mï¡ÙÑáèOLLÌrÌe,Â,y¬{X ØDÙî³Ë±?àPà¨ã<ÊÙÁeËõ‰;ˆÁ“É+ÃÛ·ç‡ó è |LÝ£ºgro–éÜ/l¶Ÿcÿð‘`Q½ƒ|çÅ:ÅoHœ;„—4’R‘6•!ɦ®”{-¿¦È«¤ªì¬­Z|¤CmVƒ«­érÔC ¯}BÇO7Lï‚~ŠAža¥Q›ñ°É‚“¹˜…ž¥—Õ%ëÒc6Óv¬öGÜ_v|ä4å"àjévɽ ‡Â›x¦x {KúDº|QD-¿ÿJÒdÀ¾@Ç Ê›¶P˰¤ðîHô)³¨ÄÓÝgØÏ:Ä䛾 {±äýerÜç\âx²*]ZÉû –ÌÞìÌÂuí|¾‚ù¢¾âÇ%wË*n7Þ}uo®’«Já¡CmhÝÕúò'ͽM£-ß[7Úi:Y_ôˆ¼”è—”­0„}§ýÞhÄú£ã¸Í„ò$û—‘©«ÓÚ3c³”ËóA K–Ë5«\käõŽ Ì¯Í®üÃà‡Ö¿9ð‰à>èËЙRæK€UÃÞÃiárPæcáUðКÇ"Ȉ"hÅ3!õÑÈzä: M8Íhµ£ÒQ#´’´‘´/è„èBééé¯Ño3ø0 0j3V¡ÅÐL‚L9Ìæ›,ò,ͬ¶¬Ólمٟr¸rlqré@k;G“g†÷:Ÿ?Š¿Zר=ð=í{ã…L1œ˜ûª„“÷“8ЉjTSW’P=¤!‰•Òv•É–})‡—V°Q Uºªü@¥_uAM@ÝP#[s”YËOû¥î½Û|†iÐ96ÙŒÛ<ßRÚªé˜Í=;f{ŠÃ+Gu§2×·ÜwO²×ºwÿd)Qίš¤L® ”ª– )ã‹øuŠ5mq¦=Fý\ñù¥‹º±—¦âTãÆ“T’“S~¤Ù¦?»ª˜ñ0K%»3—»—W [Øpæx¶$¾LæÖÛ;çË%ï V„>`«*{¨XSÿ[WW/ùWaƒDcW±…ùÙýçfm_;¢»ø^<èÑíî  ,{­õfüí¹áƒï[FìFÇÆ<ÇßL`?_Ÿüúõà”ã·Èé”™ëß³fãÍÙÎË,Ð,ô-æ.9/ .­\Y5^]]»¹nµþëgé†ÙÆâ¯¬MµÍ‘­jþwïK;w ‘ˆ1ÕÑÝiþÿ=üˆÁÐl§°CO´?Îܪ©ø+™biÕÜÐïgPÈ1=¨f…Nµ¬'úF¿1ÆÓCׂ/á£ca4„MOê[C²s8éal afŸôò·=ö›#wî¸T™82E›*Ï á\¯ ½?2Õ>6ö¿uŸ[ÛBø$3èK2¡ÊS}­yzéþŽ Žô'š›B<3œ“@1¢ÆÏ a  <@ ð^@ ˜ ûû‰x Ô&A½^ ’›Ø‘û#e·Ó&ü-)pbÇ^ÈŽŽ/˜„tüÜ1­]ëqÀÿ#{GvJöן֎GâŽ×?&Pë¿™]K»Ñíö€'$õ‡ÇÿÑ zö{p"$‹~ÄÎ)Š”ƒÎœÚH $© 0Hn$?B* UZHM¤Ô§Ú3S;ów,»cƒûûM 8¼@ðΈøÿÍþ/¯€}ÃØ¹»C£ PÐÜÈ-§¢Vbµú¯Bñ £P 9<àíCÁhA_.¼$1FþxiIŒœìayðja™BÁžà pHYs  šœ IDATxì`G•ïz³%Kî–›,—¸¤@B„$°„À.»°»”PviË6jXxð¨BÛö-KKBHoîÝr·,[½ëýg¾¹úts]¤HvlklݯM=sæ?çœiyr6îÆ)0Nq ŒS 'òs¾9Nq ŒS`œNqg„q ŒS`œ‡¡À8H†8ãŸOþþþ!ÒÏܧŸ‡x<ÄÑü󽯯ÏCÉï!’=NaS`$‡M²ñý=Ý6Ð?`ùùCÙ'ýÌ}ú9‚[¤^ö3ïñÁïPß <Š\qǰ1ñë8Fƒyã7£AÆS+² H‘´Ž†_Ú? Æ¸a|Çs6øåååù»ì°¹Ò#_ÄQXÛt|¹ÂŒ¿§À‘(0’G¢Ðø÷ÃR Øð Xáòòóü Àåå Ékÿ6œŸ£ÊáÄ7îwœGC¡úÒÑ„÷3N„€V.8öõ÷ú'@Ô%EIž€c6@G®xÒïâ}ž°70ñ¦¸Ì_oÖû´Ÿñûq Œ„ã’äH¨vЇéíí±Â¢ ±¨2ó2$­¿/øк»º »eÏÌË+°Þîß××ë*7juOþ mÀò¬°(ßß XeßÔw¾—›ž$™ò$ÕLFüp.újüiœO‚ã ù$ˆwª`ÙÝÝm=Æs—€®·³C6Á>ëî鵸ßÁq÷îݶ½a›õôJõ¶h:`ÝÝ]NB—Sx‡]±ººÆŠ‹‹Ï€ÕΚeÓgÎt ^qæYV"à €Yb…ùVTTl%ϲb=k`§¨´ôT¯šñò"ÆAr‰yªDÕÒÒl]Änkok·ûî½×}è!Û³w¯u (% ¼ ­¼¬Ü***¬K ˆõ 4»$QV”— (ó­C`Ú×Û«ZÏ`ýiëh÷Áš’’—.{å§´¤T@Üa­m­>(Ó-I´@Òdqq±Mªždg <Ÿqî3=î²²½/±ReI™þãnœ#¡ÀqI¤‡¨.qߣ†U\€8!õ«¿@ßú¤¶±aáQÕ}“ù-tÕ.­uÉË0#!ʉ&× J,Kö·ìçlñ;R"ÀÔÕ´Øa¿¼ógöð#Xkk«ƒTUuµM²¸IK!ê1àW(©®G¶Éî®n­^©Ö]]ÏRÆ!’¢‚q퀤ÄÍÒÒ2I£ÝŠ·×Jr€­Ç+I²GqæPå;ùí‘ßÎÎN)ëy6aB…­X~º]|é¥V9a‚ÀºÜJ•nYY™••W¸Ä‰ÔËéP<ìÈ—C2•<‰¿²M¹â8ùß©>Ôvq¿ >¸S§˜'“ õ>¿ŠOdZáY~¢¯êµ`($‘ÓËqÉÞ>‘&!Ì CC¿0R؈È7gº{ûó­0_“”U¢H)('XI©Ôt™ °y2Ç’2aÀ!¡z:â»CÍ¿"m=Ð)ø¡ñ~h…3};ÖÙ¾;oµÖ‡±ñZçžV>wº™ìÉe ê¬æÂZÙòsE­Ø~u·z‹`"Úü±tÇ$!b~^ŠN³¶•¶®†Ö½iå«ñΜeEÓj­¨z†•/}šÓÅ¥$=J’Ž“úP†QÐcI¾3-TR$¬¶ÖvûÑoµG¥>wŠqkgÏv)^_( GeÉe°Ñ;¤„lw(D,).u‰ûr@Rñ#1dÙ®],à†jÌàN¯ÒÂ>™‘$³@²OƒA€bë Õ’IGì ¨›ð¤øØé_¿:\FÔé &UUÙÒ%§Ùe¯¼Ü*«*õWm+<¯Ä›½ÏÎ/ÏãRdŠ*½+,I½p–ÑœñÜ–¶ï†ëìÀ°Î†v¯u½Þá–.ª²ª αɯúK+«ºk›¶Ü AòÛüÆðḀd(T»^Ù®¶Q"6ÔÆ’IÅVº|–U,_b“_ù«˜_oýy%CF{ ½ÍàÄå1¤Ýq:ª5¹2’ý ÀhïÐÀGK‹ÝqûŒ9@Í’4`zƒ D4t)ÑfpéaÆ¢¤@RØ‘OȪpŸj¸‚ZŒ:Õçàæ ™±IŒêv¬çXŽ!êvÉ”MÒÕmIx7i¸uNÏäÐUpT"VBä^‰£<ñ‰7_ª ô™\3ÙV,[n—]þJ«žTíCÅl$àbtÚ¥ÁÑÕKåPRf:ÜÉ|û½~Ù£¶çåíQÇ嫡΋¬õ7·ÙÖ¯ÝÚîßåÝVž:U”_”gÕ¯|†Í}÷%$ÍMHE}JºW½bŽ;–î8d¯õ´´Ú¶k®¶ÆïþÆú»Chxîô_’oUçÍ·‰ç>ÝŠWœcENì…4†êÉV2©´ðò á2q§ººãDÐ ¸Gji«$ÆU«VÚÏîø‘í×Ô›úúzFTJ¸ÒmzºAÄ¡H NÇ„¦ ¤>À>Ôç êÞ]‚s`RzÔd$±eö„mfà ª:{(I’øè¶Ò2psI2Ú$y1Ë®¥òQ¾` ëäŸr Ê–)?” p¥¼Hª¨Ó.µ&#÷sç̱½èE>zÎ(z¹Tò‰ú+V~‘OÏ%ŽS E`§e¤õ?à|p6O6G™ÎúÂNíúÞWm×—¾c=û1«.„íʧ—ÚœO|Àª.|™^' ™¥zÄcüs\@fZû¶+­ù—ëT<581*dtRŠ‘ó+ mê߿Ȧ¿îmV2µV_Òªyê^½ "xo/¢y¨”1¦ÝS6ú.#ƒ.m²½}óÚ¯ÛvÙ§MŸîƒ.€L€Qj¤°@]u9L•º"ÚQo°é©† °n€wþ-tlÑîÇûC©ÛHªÅE$ÝNܸù$ÄšùÍH’šºÃ”"ÔiæLÆÑm·IJM"IЧP‹Ù(IÆN3‚{ȰQbÛÄ`W™Éc§@2_ü÷ðL'AGÂÔ%q^íû»7¿Å&M™lÕRÏ++eÃT‘0³%wüýJ ³Û‘ø58ÆØƒ<¤ü˜µ¬¹ßÖ_õÖ³­]ß‚ì߯ú,)γھɦ^ùÊ0ó%tæÇмÇ$7~èm¶÷†{ŒA­~•¦ÄÂEÑ‹kËlé7¾f¥‹V¨qjtt ÏŠ4˜ˆzˆÕê4@ò>û™w§Š±çÝ|ÓMöèÃ[¹F‘'O™ªŒ½6J‚¢·T¤:ˆÐi9hJŠƒt^¼GŒ|À"EH@ÁÌÙ5‰;‚$PÔ_­¢ÈŒZ¡Ãˆ4Ót¢ôÅ<É ÝqÂøŠ”¼äKfž#÷äÉ û!j< F\iôü% É  íÞä@H’PüЃ8qt}zÇô&èás3U6¤_ÔzÂP$#:“™,&it|ÅŠöÊ+^£ÿjŸøÎ|Pò%(|ü''º¥‰D›v2(O’ÿŽÍöØÛÞl«š„ ÔZ蘋 ólÞÇßj5—_•†rF@KF¼¡÷Þ© ‹¥JwîÜj+/«ÞPÐ…(±à@iž-¾ö6éÙ—8/‚+Ìp ³`BF½ž¸#*Ѥ&÷„ÞÂE‘b'F·zëuW¼ØÚ×5ÁÕQa\ˆ¥Â-¹öÃ6á9¯ÈŒj¨D'Q Õ`coè™FŒT„…4uà`³]÷Ÿ_·R©ëë I´Eâì—ªÝ;˜¨é/’€€†‘^æ>ª.<•ŠH„Á%€( ‹Õ.ØÊ›6M6os ‹!¦ø°‘cÀ§—ú÷FX­ÁAK‘†žXa”G^p4(†29€¢IÔðÙ;YQS”+ï)¯ ÓÝSÓsª3yRÞ9ˆ¦.E*3Ä]oÍV²ä%t"àI›z@õ4D ï±ùsçÙUo‡ÍYcòä7m0ÉœòBw÷ oºnã}ìÌÒuN¹O6— ’”?Ÿ/)^iüùlû>-̈%WGN=Ì(³3n»Ù &OÓu$ÂJ_Ôhž«æSç D­3†}ò×1ɈèÙÄØñõZÃÿù¡çZ ‹Å8*&rÚ‹WXÝ¿óäKtÅ@£ñÆš*S¿$¸NöÆk¿þUk8.嗆Ȁ “¸œcƒ£!‛ M&*Pð‡ÿ=$ϨÊ€+YºŒåˆÍÍúÓp»€‘Iç¨÷n³£6àÇŸ£"×ð4øèïÿ£,¸®Á…PÝúÉ«<¤¨Î%j:L!Be“ŒR ’"°OD—ô m;¤^î¡ß¡¤H€“ïH‘ø¡nÈ #àeÊíêëØ›ßúV›5k¦MÖàâĉUV\¦Éч ©hFú±Žb%?)/ÂBï,ÔÃdV91¯_S±LlÛ>ùÛñ?w‹ÄB½ë­ó¦úågXýçþ'„eúãji&GZ-5RBŽ Hbsˆ¨ÅêžÝ[ì‘]a½- ?R¿°Vnθ㿬xáÓô^#\ÉŒ{÷p þÄN&»è4ΖæV»Nƒ1[6o¶E‹»äƒ´‚ú HÐrQ5‘¤¸âht4@éŠÆ‘lx¯O#Eýîèu¸ŠÜ©´šmßþý~mnmH¶0¥.#F0L€Ìtü¤©¿ô3ùŽNøP$Š‚0á)ÊMXÒ–%šÚ CÏ€(!ÚT^ÊÂ;‰˜ PXÔÝ E³D´“"}SÖ*# [Ð?š)ôZ¸`]uÕÛlÖìY6IÓˆ*4ÐP§ë6ÒÇ©Ž”5:€‡d ]pHùyBÖƒöÈ¥/¶î}L ”æ£ÚB§aåòâoF3^ž—H’A}3µ)-A´Ï/Ýuúc’Î õ ìº5±´HCþÿíMÖxË}õ‹I±ø™xî,[öŸ a >ªúù¿DÂ@c#‰_þâÿÙ/~~§Õ/Yb½.ý¨1;HA)ú\M\@}Ë)Mú'udF¢Mq£:8pÐöíÝ#þ KŒMØ©§Û%OEùå:Öîpé¨  €)¾3—nh`&­pc‰b…Öv³iÏ&I.-ªƒq&é0ˆÃ{Tm$JÀè¡m¤3þ¡›''~É ^`/¿ìå6sÆ DCªep(:ê–¬˜§Xf€RôBÄ1úÍ(øÎk>d[¯ù‘ЏÎu®ªäÙÄóçÚiÿ÷‡Ò˜^$ó‡T8ÑpT=RøÉ_Ç$©p¦™ô¨°L¾íػݿø¥6Ð8º”Zö"øV÷sß…MÓmOÎàO¾p'j a=k˜¦Cmni³/~öÓnçb5:—\`ý‰ÜêdèEQÛfúƒa¼±ºª¦¾Äi=¨œ¬Ïnko³]ÚÆl¯vðij>hûƒt¤H¬PcŽ>Æ¢=áz8à|‚ç/È?œØùŸhÃ?$ȉ­.0Å«(.Ðr¨ =PÏ1GŽHæq Z3ƒÿÐØ¡L©©±w¿ç½6oþ\›>mºM”TÉ€Qå÷“Ÿ–éw'Û= &re\ö3 u÷ÞmöÈů°þ.ñ5u¿+,ãUÏ»\š”¦ü U$LÊa‡æ:ºnL@’B3ò¤‰ó>tÏ5°í_ù‰ç<–‡¤c°¥?¼Ö*–œ£^]½‰ °cÑŒ.ÙÆ66ì\¨omR­ÿû›×ÙæÍ›¬Nê ˆNã ]壃øò“‘Hä7J“LìîðA–vÛ/5zï¾}®NïH7*îÌôÌ£UÊ' p£•t<˜Ü¤—OËBÂTT)ÀD² AÅ’%I øéXP»i¤½Éè>qº)º¡fÓ†yæš2(Yàû Øâúz{ËÛÞnµµµ6}ÆL©þš3ZžÎÖ`½ y{ò=dw°mtT&;VìlyÇ_Ù_­óOŽb&$ÆŠ§Ï²¥ß¿]U¡ ØôÉhYÿº¥$˜Kb|Oö:& ‰m W…T¶5²×ºä"ëÞçĉ)UmècÚ§BCûùvÖ}¿·B­í¥°'ûô‡£©0Hî¹ûn»ýöÛm¾eÔ >Z`¾Œ2HŠ<âhŒeÀ$¯!!©™zoL F¢YÅ0îÞ½Ëö5€d‹"sp$ž´{*[:£qË%Ë –kô\O©æ˜NÐNA€%j9Ž'»ˆ?(E2mJb Õ#20ä£ç"g¨L* 鈥—¿ô%öüK.µÚ9³­rb¥46ÔSœ‹±†¼O±Ÿ\e…FIaQH¿í¼öS¶íÓ7¹Ö‰°_GSÝü/¾ß¦¿ø¯¼S8ãøÇhsŒ@r0›»o»Î¶þÓ—U4øHRF;›¹šÍJ›Â¥vöÝ¿KHpÖhÕh^ssüï2^*+0 Ž©5í,ùÂç?ë ]i\ºÓ·0}‡f…k˜É¥KIAÄ HÀ ;&;ÞìÖî>;vítɱYöF±«äYÀHœÑE‰ÏñJ{?L°è-sñd_3Æøf¸é:«›-A+FÇY†ˆt Àa»d;v*\Q¹0"ýp¤^Nfà|ZþõZt±!p £ª+Ü®»îšÒ³Èç9#S`˜ÅÕCEÂh4çŸýLžøFãdÞb³æOî0nß±ÃvíÜå#·nTÞp!6¿Íù|…´ÉC|¦‡ãb8®”<>Ç8ÒïÒ÷ñû“½*Ý\ñzúPyy¢­·Kôò_„tÉ€O‰ïÀH?¼ë )’º€DtVÑy=ëÙ'ðCÕÑ®={ìŸÿí_ìïßð·öìóÏ·)ÊÉZîXQ¢Í?d¯Ló á%^2&’$³˜Êë—zÀIwu™V¶ýa³u4¬·²Ù õ¨¹’}Aݱm%¥ÆiìE™¶2FØ}˜,ö§€GŠ·ÇçvÚW¾ü%­ši¶)ZFHCsiF.ö¢1.ÚÕ¦.ÇU¦=ôˬÁœEF¤wîÜiÛvlHî6æU2g2J¤1Ž£¹Ž6h…|‚%°Žd œä-vúþpù>Zé8ކ<“G¯$=Ý#U² °Ì\²U'}¢5ª8®PR$ìlÂD$Éßë‰SØDUŸ'µûÝïz·-¨_`•š.TYYéj>qDÛ]¼òîdwPS˜íÀ‚{Ï:Û¬UÚ‘zkH½…wÎÚ·½Ðf¿÷Sxöí¡øhiÀ#£ï€ìؼÊzwb‹Te+:Ð3̓÷y’$[Wý‰¢…|ˆÉN—–(S=‚ s¹ïÔ •ý[ìß?𯾔mÊäÉz+ʨáÑ |›HL(äê4L¤Æ%6Q#ÒR®®vÛ!iqåêÕöçûïÓßý:tk»vº¸É€LÏp.é‰C1ï•ugz$)Öd+ó>ªΞ)ñ)1N©VÎpåDDhÀôè肃⠞Ã9ò{h?¢kŽðG*#}9ê4Xµ©Þ­Q6^V¡&Ó0>ü¤ÔÎç¦êe§ãŒ;A¼mÙ¶ÍÞÿ/ÿl÷Ýÿ€íP½57)^i¾™´Ô{1â @Nv‡å“Ä–úÅÃÚ%ŒÉä^Jø!`‰ÙÞ_ü^t׳~Æ5F—Bc"²Åž cýã^gÿQ9U:zì‘à"*ø¾ŸÜlN??Ts*OF óôŒ4–V ÎüñO÷Ú­7ÿÀêæ×ƒ¿Þ3‘™†æ.ih¼PõëúA6ÝË/s™Ó¸CRãFM.ß¹c§7BâÏVhbGÒŒþâ•wGržM5â"5h¦¶0f℉®šN6ÍÍØö‰ø|íµ@ž0™`à˜Ým¦’ŽY¦Hj”‰ò2Ϥöƒ¢_{{‡Ïˆƒ%Ð-C»¬Lg:½?:¤ƒGsCžnÜd¡Žþe³ Å8lŒíߊ˜„. íU&"q”…©AhêPAùÅlÂ& W_}µ]~Ù+ì…/~‘Í«›oUýU’X'¤j¢Avç뉜d?Ð,?Ùó¡eóZè …L„ÞTf€{ÖµXë†G¬|Á ñ-dô!môcT6iÀ ]÷7p ¤P ÏÓ°F[æ/ …æ¯ù—÷Zß?…UÁ«—'°‹ •m“Œ*ÓÍöµ/Ñ'lÏÓš_6€ ­<)¿ÞñÏ{PÑp„CX.ˆºS€±~Ý:ÛÒÐàv3q%²,_Ñq@Òé<™d¨yf0‚Q_Ž<˜.d“Úb#àȶa\™wÈÁ[ì ÉI‰ø$™]¨H¸2/¹TœÒ  àƒIÉýHÊ¥8Ö†3*ïWLšX2»vî°­ Û|³ùáœHŽx¥|Ãu1L,¿×ŠØW ¯S`ÍÈ7e.×”êi5ZŒ Ü0Eˆû_¨[L%Ü~ðÇ-·ÿÐxð{ç?üƒ-Z¸È&O›¢Ò+½s$ß'Cû8ý9v…6m¿û™s=ªv¾À¼À–ÐÇŽûî¶ÊùKĸÚãS1£êÆ$Ùé{Ë€ŒÐ4jrÝ«žÀûS˜Ë  1:6µØ¾ïɦ¿þ=‰¸<&ÙUÂ.²to™?4”MÚÞmû臭¾®ÞŠ'kj‰*Ô%]‘.KW­D#aM ‘çSyPó¶lÚd›·nµý²ArÜá²]»øIQf@#Ûoú9‚‚‡§úÔ€ºjíp³°¾^@Pá°'Ê.7Qö3¤–ýùvaR™9\ Uš©-€ k¨FRL«Ñ€$ÀO¾ OtŒ¼Çކ+¼€”˜-8Û›.FïY)Äñ¶AªÓÎëÍoÕ†Î>x%ióû½[å™8UÝH—xéî³—¦%¼Ý¯uÆ}2 èC{Ÿ6øPÞ°YrZ£Ï ð±KöŠÄkI~‰'3Ã@Ð@ë-Ûd‚ù€ýû¿@@¹Ð—6b«ŒË—ϓᛃãšî¶ÿÇ?ñà›4ŸWÊ€¶V“ƪã"@Žþ˜Æ˜ QP™4×lμPg0ÿTXý½bäPo¶ý«×[Í‹_gE53NøzÚ›±³Ž¤ ­‡¾ç·¿³[n’z]·@]•©JF•ƒ)  )^i0±§¤a¡jÔŠ˜ÚÐbr—g8ÔåÒ_œÜDt”ŽÆZ!©ˆåt³k5ŸO`X=©JGÔHr¬Ñ0U6ó˜eÊ]µF:,c×qu’€$£ð\€ôR¼Ce!JÚñšË_¯@‘‰ÜHÓ,t©R´4ÙͧCe§ìyÃÐÒÚfgéòa{Õ寴—½äe6gîlI•ÓeŽ‘½6$}RÿB›¶‡þd-îöv©ò½D+•ÜR7lˆÑ£y¿`íi,܈F·Ó"8*ŽÁÖ¦R›R ýøÅj¸õªR9T† v3W2Øu(V\…3E:ÔU IDATùUg[ýÿºÎÁAʇ>„òC?`úð{ ]ì¥3- .ïqñ]\SÞý~â[ñEÇw·EIŠÂÞÔÖÖa7ßø}{ìÑGmÊÔiIc S„2a¼Á„¤Úv?䙼s ÓxÖmØàF~T;ÒtwŒ`®¤˜±ëö­Ÿ$Éqª“¦L›j5F–ª&MÒž‰U¾DÕ’Aì­l 6hw…ÌŒ Š88†Í’ÓÙ¼¢M@Ù¬Îå @²i“V5ÚI–¿¹ë._ŽÉ ŒKyY饓¶—TOÆWîö°*OBêÎB@Igf ŒÁ”àR­"…æ4n´:EÏ‹ÞS¿Qò}Ö¹çÚÞðFí]¹Ðª§Ô¨Cš¨0’<åOr¬o#†ú.hQƒ£â™ à&òuì´âó¢Êd°…6„‹ä¾«ÍVýõåÖö€fh¨2q  "U/´%ÿySðàChƒ2Ù¨-•ˆ$¡=œJ<õeD’$ öIÅ`Ù¡OW5ybšõ¨ ÔÒBeA‚…v~¾íúÊ5‰\Õ;ü‡7JœB»1Vß÷ß|¿U¬ø¢Í|ý»å3:0€âf©RØ¥üðå‹•}¡òáb%G@ȳïOƒ?Pâ'ÆEø_Œ‹6RÌXëû_߸V›Gp°i„IÇAXÞÓhøCëÓüI&oظQÒãFkRìuc¨òC9Eñ„†í7R8_ƒ'“´a,§"!:0NjÓ$¹L@V 'IŠœ0Qkœ±9JzÄÆøTsØ6QÍ«%í⪑6EG¦Fu´ë¯Öf;¨zØ/ÉrÉâŶwÏ>M´ßm¿úõ¯\%§¾UîÒR&ï gtý,ßtW­ÖÛq CÜX­äU† ç@þI«ÛJ5H‘Ê@(|Ÿí¾ý;Úæè:ëÙÔš¡óŠš’áüU*êwÉB„R«g(²ù×ü‡Õœÿ"¢•«^‡SÖŽÆEÂoú>2DŒ#ýŒ?TœÓ2ñ”ÃÇw:,SÀXm­-öõk®±K´i0Á÷wT:ŒÐ¡^¹$¡¨\’-G\-²©mݲÅÖ¬]ã#ØýÚé„Fôd%#A¥$ÂË—û¨ô 5¶©Óg„­¼´£vµ§ú„fæ.ªQG—¦W|w¢^±ñvh$y<`¤Šïf7$­„ázûwøÎHL?ÂÑPqª¦Ì}xs¸ßˆú¥¾±I²›z˜x–pâ¤ÖŠ—ÊŒ¹ƒ>³gÏQ‡VaL§:óôÓí4ý±A†žÂ­‡ò©Ó§LHúØgÙ<£Ye«T<ìàÚíT;Æ3O'Î Û¿ñ\¡‘YÌPˆ y¸Òü›@XR]ÔäX§Í'\Ó]7ÙÆü¤õw&íDô X¨ï~ÃKEè()Åv†Nµª©Ê³vòˆ+œŽ€¶ëšªÞ¤ÛµÚ$ %՜墦IÍ[6ØÆŸuü©Á¤×+“ôd›gÔP*@’—´}À1J˜z«—ÒÊB[pÍÇ­úY/ ¯œÑTtGÊÎÅÆœ¾:£&á"³GôÃ=ƒ"ÓgÕZÖMÚ|`®F7ÚMÃØ¼qƒÍ_PoÛ\sæÍ³½jD“¥Ž¢ºUËV‡Z–²"x·K­äY=|xßoÛ¶îLv—Ñh·Ô=Y•y÷®]¾ãøêµëì÷ø½5bGSƒG(ËA/¾É~Žï¹²iC¤ÅeK—jùÛâL5¾Ù6C;h£RWiô´Zàèj¡À1­ŠÅòO”V¸?Q ôÊ<†²­Zíä+–DóÝ¥–snÝÖ`?ùé¶M|áÇ>(LË£/;œ/§€Ø)Ùên’LóæÎõ êøúðeê´fΜiO;ûò,Ún½ÕV­\i§vš­\µÊ–ž¶Tו~]½z•-–=y®§-[n«W>.ðÐÌ€½ L|hyHÙ±ß2U €õbÅ»aÝZ[¶l™½êÊ+=ûñ'Ý^â»á_ÑŠ¤‹çãäo@sß¿e›?|M²º†XÊ3òTdwtöxz,’–ÊÔ*‰¤V~V­Mºø"›ùºwX¯F»N„N.j¤¾[ÚÂp܈@ÒAF›êööI„-P¥þ¶¯ú£mxó»¬s; SõIE •WÅtÕC¿ ̃QÖËŽÝã [%*Fʪg^"b2‚ê  ÏÅF>XÁV•ç#žôªë׬1¶#[£+ôÐ[·mñÁ‰ƒÚ„v^]mÕDí9͆íÛl¶®;5¯N Ù Ð<¶n±ºúz«UP3g×z£Ã–‡ÚÆàqš˜|ã>ù_hÛ·mµ¹óët§êl”Øë§A<æý®_ýÒ¶KlضÍ6Jýn—Ä@ãÅïnmy†cú tK;$‚éš0üTû¬ÚÙ6§v–Ž˜e3fÎÒ€LµOãA Lw$ÄÍ wvéØŸxóûÄ/O7±,ÙZÏâ‡Vzd ÞÞÀº÷:’w‡ÝzÛ-ÖÀŒ‚D²<\iPïË%M‡Þ3gØœÙsítI/xá‹rœÏ\“¦rÑÉÂ_ù»A¦A‚ÚU¯Ôÿ]’8‹µP`‡óçÔ³lËÆõÎcëׯ·Ç~˜J¸¯mܲYËçˆw·Z­:zÂͯíÒ®OtxÔ-Ž+|³ˆLÔV\·izÙ9çœc‹µûýs.¸À–,=-é8á¯(y”#ú!}$¼I‘:9q÷—ÿCÛ)ÞNÿ <Š÷+PäH$Ç€ ¼¡$¶ô+8h€ä t(™Sa ®þ›xÁËõ–ª´yioâeXÿrt?#É5%z~óc÷؆¿ýGë=ˆ½0T†X’< ௕Y åâ¯7f}§pTbi¼ã™÷…’(ë$QNzö‹Bá0z'£ÞIœ—ØXcƒˆW<7Øo?¹ýGêWùjŽý>ò©c 20À‚TÁ¼?â‰y¡¸§çeÞ sïÊå§Eª5ªÑ$Q›Te$Ÿ\,u‡2À„¨­€dUe•ötlµúE‹¼qpÚÞ¦m‘zÿÍ›6½øBçÛ4Þü:¡­T¼LÐf¤¼ZöÂvÙ̪¥Æ8Ðä U‹†I^Iïúï~OÀ~mÖh5Z ηB©{ŒR#1ÖΚ!•ZG h~#’M68B£4½xŽôŒWÞårGúž+ÌñxÇÌ ¦Ód—“¼ -wiFKJ]Ý!°d¥-³Ûn»Í¶ 8Ef«”z:S*ðìÚZužsÜ„qùkÿR|ªSüDsê Rõ½W@…J]’8 ”îFÒÞœùóm:ç2©Ó¨ü8ê¹U<…jÎÔ%ý˜˜O‡Ký2µU£öÓÂŒäÇ%n[ï²!³$yÎQ§ÈZþùJ‡ó×ÔÕ¹‡ƒË¶i>æ™gœa«”~­:M:¦y5Éѥξ˜¬Žžóz/Zb—¾äÅÒ¹ô(AjåW@„y§Åâµé…-R^½]È43Kæ€Ód®yÝë_O6Gì ³WÓ>[óο·Ö?7¨ŽM±c‡ü¹v©+í4N¤½ðLÀ4y¦öÀLw½Ò< „œSÿú9V÷‘/¥òéð¥àC‚¤W‡K êð!oð :HJd›5¯{©µ=¶OY … 8 ¼#»¨Á¤Of¼'Ð5‚_ÄØP¾Ñ[ÐKÇ{@’9–V^`õ_ø«¹ø5xÍH6ÚÛ]>Ø~% ä5Ž¢Á?¸ñ[%¦…©Ô²/Ž!íR/V©¼nþ|ŸŒÌîÓ8zäØ`üÅ!~ze4%œÁhP¬*ɘ&ƒt:YÌ»SRÄ M¥A BÝm°2A›ódHg‚¤9쎀ë‘'IòØ.Uk¦T%Àpf… ñã_˜IaŸÔlâÚ.ÉfÁü:›®0H²< u ¦^ýJ6¯5*ÿê .y¾¹Åí«H¡ä—ÁŒþa?‘nhÒAÓñ§ßä>÷€\=Ø@£™´ÄÄ…¼À”£Žÿþ4ô'ð~˜áÀ—¨ŽrÝÞ°M Wf¿ùÕÿs~¢“Z³j¥@IƒZ»½g÷[#óÍ,©¼{T÷55€‘N‘|PßÌÙœ:uŠ×ûdÙYlÀ\X–¨böA]f0 턲ŃÉÈ ùŽ.]ÆÌ»Ô÷ìwžÚ# )Ô1¤Á7Êßs¼0Hª¸¥e:F¤ÕÎzÚÓ|PoÙÒevù«¯´Q–T4wµ³ùëѳNRüœ8 ÙÉ€MËšû5@ónëÔj;ðl;6(ž¨}ê•Z×à/K³Á ’òÖ®@„ šiHßãÂOü¦×Óþîb«û·Ï»„;Y†3ù%ƒü~ì±…ßœ éüÕ'ÕY§ŽybÚF‘åAB„ˆ¶~ôm¶ç{HŠ ªDKFq>ñ“@d$¼r¿¢˜P^•ãD¿â]ˆãõNŸ\ŠD쎯 4+tþgþÉj.}*‡Cc "p…Š #n˜ÕËЫ |¥ÚÜtýõÞ»ßõÛßø&È¡ôÔóæÎQ:a % Z<;HRk9\š1a(F±¢òøœ5J¨ÿ±aÁ|8®‘Á3‚W…EBôozöF!p8™Ô½EvHÔ²Öª'ß©IåL×yó ÄË®ã¥dò4ê?Ò*I×Y’Œ·K­ZPWo6¬wc?`Ú"ÉcÑâ%>@±xéi:yq‘«t¨‰¨ùMRÕ±Y¢€PBß¡E鿯ìA6^“ ^/‘⻑^³ãŽñxÝó ºRi7Ø(‚„O¹"(â/ b‹¤ük¥Š2 ÖÕÙm›Uk5ã€Iô»¤~¯•D?Wy[·luúPwS%ýc™:e²KˆH¢¨¼ ;¤}Ì÷:_¨oŸ@®)*.% $òHÌg/Ôuxž4²] Ãû4ò ¯¥ß‘}cñ/SŽà—Èû˜“Øô©KÊ+z‡„¢ Ò¬Z%9?ï‹léÒ%öš¿¼Rô/ˆg1S08EšQkA£Éó‘M Ô=ûƸû'¶ñý·¾F¡rûÀ®®_Æ)ô!K”] ¥Pzƒp‚xã–Ç€“ÀÂã ±Êÿ¼¿Õf\ùÑ"UO:S@1L= ´p³ùó ÉÖLJ=t¾Þ3{n’ˆÞøÓïÚÆ÷|Ö+; 8—U—^H瀩 (¿á™wúsuÜï Í`1QËuEtv$ÕÕoKò­þÿh“/{£W ~úï×N!ùE4ˆœ}b>Fð%m}åË_±_ÿúW~(i,^PçÒ“§$ ÕP"¬ÆK3Ÿ¿ä}¦†0qäÃ¥%á¡Mt™ç¤¨<ô0> K¢})ħrÉ`àyIâ#t¨iì®ÓÙÕ)¦ÔHºâeÉóçÛšõë]=B-’ý4?_þÅäH6œmƒJ†5‘]²d¢¡ }VOª‘½µ]vÒÓ|Pj©¤Ñg<óYj ¼¡ÖLÆæ*ULi³¼Ï¤7Hýhü¸lfó—£ð“ Ž8 QÌI*6Ö˜l¬??‘OåSh˜`ÎÑLÕz\›¯•4ÈH?¦ˆËô .-/E²š¢©Rh%}2á°Éá—j¾%&€($ƒ/pНSWTQÜ(¿Áé^~ÂÀ/÷"ïp¥´â;Â…à!çÙ! ›ïso³:yè@RK=$h$<ã:dƒ‡~ÝJ˜e¡<ç{Ó›Þ¤N 6 #<DAb˜Õ!+H’y¶ûÖoض}US|TFŠ»ÐìHRâ5ÂWßCÒ!|@h ¨wr<;XŠŽ!¤ÞëN¢‚hQE¡-¹ñ›6¡þ4½ä¸eÑ‚AI €ýH¹Š×%RÂÄ|é—$AG×Ee‰Èƒ‚8ÚÓÔh]v™uk¹¡ƒ˜"ŠË…¼dÊÏŽWb$ÓôŠÔ3åÌôä"@nñÌwÀopZîù–P•s•jÿõ/mÖ_¿W+y8“„†ªFŠô+Æç˜IÒ’ WKÚ´y‹Oï™PQfõuuª°Ð›¥²£ô`Ð޼ Lš‹ñb#Ãa:qÄ’p0£bQ¾©Œ ø»¤!ð-J›Äéñ„Ìxƒ N—0•†ìåÇÄ[hAzJ=T.ôÒK§üÀèx‡É™ÚdY©uÖ€#¶OÖ4wkà‚¦M’‚0°å È0€…DÉ\IÖ=׉f,I¤¡#Í,xnÜ€TºÌ.~Á%Ú(v’VuŠT¦µ(afƒ™`”~ÒqÇ{ê#ÖÉðÞ¥5z{ÕIÜõž÷ ˆüòç?—Ê¼Òæ-Xdë5ˆž‘e6ÁfŒÝ23EˆÑcÎ!gÎ"ƒs ¨°’Û–eÿóê#a¥•æØ–)X^ÿäCuGýëV•ü§ƒóÀHEF`ËæÇìgÂåzÇ{ñĸx’dàáØ&àø ‚+’$qúÄvˆÆ›[4NËkU¦%²½.]¾Ô®¸òÕòÇbâ|_ô!–vÞýÐxÀ”-Ÿûí¹ö§*©¦cý¢1§Ðb–€!j7ÐG.¨Ñð;Oƒï==ÓÚ¢¤™4†À fÏ„§Ï°7üLÄ'œkò©¤W¶À‘l—$CÀ †rb™ó„†>€„Yh›?ðÛ}ãŸ=c ¹âá3ªu1‹ª¬lÉ\MŸ`…¨Ž]{­gë.ëÙ¯Ñ3 F朘D §GR(â‚XÂô4þB?ò䤔¿oºDÇÑþoù "2Œ Q!-vÍ>gwk¾a·$¨Eóçk¤N+G`P¸9ºä60µR!ô̓{r‘ùIÇ¥¿H"…ÅïÁOú9M(‡ò‹$é£aÑâŶNƒ^쿹fÝŸ3Ê [¹ìn­Ê’¬Q-¡ÔWxèI Þ’Xâ(OŒ—WnÆlèéÃÆç\×´Ÿtœø Ãñ@„/ÑPbN¾ˆé¹°°ØZ5àˆ½ü‚gŸ¯)NËíò+_Ú4ù§œŠšNIè>ônkûõº >°wÀ9’»J¹ ,êt¢3õ »zuÈÐÎ=êÈš/Oh°P“à8•4xŽ÷sÞý ›õ¶¸È ð­ÂW%•æOšŠ¼‘ó‘Qïª(ÉP#xÛ}¿ÐÂóVÃ Ò ‹VæÙ”Wœmµoz§.8K”BÙ àEì‚0ˆ¸sƒíýÉMÖt÷ï­å¾íÚæF…¦$Ê(¢¯”G_ŠÍ½ã{Rhâˆ=ˤç.°¹ÿüa+©?‹×òË´¤~ûê—¾j÷ÞwŸF·Z‰z²¥2°Sƒ=Õ›P¾8#(Èÿ<@ò(ñ† L#ÆÑ=«::…•kʨûlTh¨A v‘ñð¯hý]A¾A?9q»:,ÉŽ&æ;õ€} ”?+: ´ˆËëOrÄI0Ð9œ—|‹i„CÊdÀ)#>ê/{(»î ¢ïÝטÉ“›ëëê5u@€¹ÌΗ:6»/ÿhܨäŠ/–Ï|?é¸"]([žŸ!~@¶Õ 6hFÀz[§yˆ=ö¨ò¤9¯â Ö¦Ô4¤óy½ `ó-½þÅ'Ð%•g¯hMeAdxAôôç”$oîôà|¡0± a @'xÔ/~à/… ¼Ò!©µ±Eö“ÿØîÐ<ÈÕ’fHzœ))Àí¤ £: „¬;8ƒëYŸÅˆ"r¼0HšÉH/ z4„P @¿ŒKV’ <. j™û¤L|B<Fùá= I… [&Æ}l›[|)\¾À©—ýüaÏi’ÚN?óL;ïYÏÿ©Ñ©±Çü{¢#ü6”/ms„§pL½ùÓÿèƒ.¯Àò»»v¸ të™U*š†SªÝ‹°BÊðç4 ì4DÕÕc]yB¤§<à™oÁQið ï©?LS¼†xà/B…kŒ"¦Ã—Ø¡°Cã%Ù¤ÀÄ0$„ .í‡xãïÉ@ñgTm`Pµ=wî—g[¦wÄ H&ÉäÀrÖ žó{ýßhÝ kmÝ»Þn­îø…¶ ^@M˜¸K²Î…7\÷éYàl‘|ЙBKµóÑ™ µõbUœñ41j±•<ý"+Ö€PÇš¬W‹3š~ýsk¾ûAëÒh¹ã’ÒQq0Àð½ŽY¸óû¶ñŸRxe€ï^B[ϓΟk ¾ø-+˜P™Q…`Z¤BÒ÷Ä@8\]ò“ü&ˆK4oØl{®ÿ†¼ëÖ¶æ Ž„`Ö(­ŸBû;îqµïºÌf¾ãvËõ7k5Â*»÷Þ?Ë6’ïóƒØ`RH1¶(k4€%8Å©ÛÈŒ0RšÉð“ ’IÀ óE  é@¦{I¼ç}Æéà6úï´‹ß“ê¥ü_¤ ¾yžñŒ#¿PBïS@Šˆ‚¿ žú•Bqî @H⨯‡Þá ß# "¤ Ý\ ’vOŸÛ';]“NjdÒ< Ž)KL´f©Üùç?Û–¯8Ýãퟛ¥J¯ÔT§Z­nÁ^ p³ÃO±€¯S€>[«Œ(= œé7P z 5A/h‹ t€WèpBùW¼#¥†žúÞãQÌÌx&´¤“qúê>Ò7ÐQ9Qšÿhf¤ IDAT‘ÏHãÈ'߉“kˆãð ésü„ô‡˜®¼Oƒ$yA‰ª6›Çòs…G™¤Î{±&·¦¥É>ÛÖÞáuËŒŒ¥šnvîžÇm×—nózÌ[äJ CKTÅCHAØ}¦‹¾)J+¬-·êó—[ù3tŠä…2ßLšÚ•|!¯c‹DH¸ »ƒÉÖü»;mË|Úº7j3ê=€NU½ôÛŸ±‰ç¾ÀÛdlt­×í§Á[üÍ ’|Œ`ÆùÂV¾êbkWï§¹‰9ô[ñ¼z[úÅÿ‘DWáá"PÏHéw¬üƒíúÁ÷ìà/î³Î}áÄÅ(BSÞ(jOzÁ[òÕïj4±Ïþ×Ç?a¿½û·ºï´óÎ>;a² @p‘鏿rQ­‘ÒL†ßHîažèBœ<â;®iÿé÷éûÀŒ!®„‰wOƒdš¾1þ˜×Èìiÿé´¸é9HR‡bÖtº!®¡ ©J§º½±;hÌ PêÌ`Ãk”Ë Q‡Ô¶ƒb‚üâ…‹í—¾Ð.¸ðÚùå ƒ…鲑×C¹°†šÚßù®=¦©:wÿînט?Ê@µ =ê4ÿÖÇ=ÍTµªÇ'‚N¤iFpÃtÄÃ÷´K‡Mó~"oà'íbÙqá‡w‘?Cø'æ7W¸tü1žø.æƒ+aჴ] Ä z­º¡Ó$š~”»D«š*ÅŲÖvÕ5÷?÷¹ö¡«¯¶õï|­íÿÙjIv\£S Äâ Êó­ìì9VuöÓlò¥/·ÒEOYÑu £Å¶~ë[w)|¨ñŒt*&Úmû~ö-]ºUÆßõJEß•8M„ëÌ×½Þ·¥ºþûßרd£î{mìN0sp!ÏGœ—0¸‡Î0bâñ”ºÄFH¡Ó÷9‰ Úဌ; Þóž ÄGåÙ&jEQ•¤ÉFI˜«W¯´æV1ð–öú7¼Ñ“‰ èS2Tç°ôáœKgAš´ÍÙ-7þÀ·{àü ²6MÈ?kÅ ÙÉZ|•€2ÒÐù­3ÂáR AÔ¦‡8Âf¿âá~ܨOï@u¥ógÒ!†N•Ž5غñÇ"æè²A ]ƒd—¼áÍÖøó÷¹¤29@Ц˜æJg”YÕsÏ´ çžoÕ—^a…ÅåN=%û¤Ý@éD›ó¯Ÿ·‚ª«mû—´\ŽÅ,ǽ‹´¶{ìàŸeUç^©îCs09 ù¨2à¨v«¹å{ºGƒa4H³ð“WKžæ"¸¤åÚêÁ,<ñU_ER¦‚ä!O‡N]üZ›òÂW[óý°-WÔZW79ˆ×\qŽ•Ÿÿm>Ñ`ë4·U5å©eã/‡Žò@\ºÒ=¶<Å~"èM±“=Ñg|A’ÝêA#hîKèº;dËóòUÚ˜‰ï>üˆ-?}…wt¬Îp~Ðäh¨ƒêëÐgÔV¾oÔ`ÌÍ7Ý,à]cÛ¶iªSÍÑfLkâ òŠò°ŠÈóF^7j" ó‹,3ÀnV9Ý`g0ø™ü¥¹gðˉt:¹¡åŽuméˆâDò`£LI¢£Ÿ'.𢉏}R ‰ÉªYz½Û¶ù^‚{Õk^k“_÷lkú®Ž}ÕsÁÄ|«~éy6õ¥¯²òsž#ó™V¸Ñqùj9QÜIxàÉГ(0©Í|ûG´ëK‹mÿ¯_;PÓã0Û1HÔø£[Hª, cB‰³AÒéê©·ôîž ›•B[ËC:lG‘‘$…>몗JL¾Èý0`Á¨õÑl>‘J*ç-#¡1îAÀ[R,UE`Y)â.¿é›úЧûŒú™¯ýÛùó_ø}ZeÃäÖ@ŽAu{E±‘µ‰†sâ3~NB>É—‘.Ћ¿Ðqe™AZ"‰ Ò›÷0u­—)%úúA6Ã?H5¦Äåkƒ%£íCÓˆÏ4^â»YKL¿ó?ßöÙ Û¶ØTuˆgh—#æ,z£óQëýŸµj™ãÜ–ìù’ðå঩o«Ö«Üƒm?𑼰©Ÿ¡Ü˜|ˆQ¹Q¡Ù1¥kµ¶ãJâ£W(žUfµoû ¢Þ@§ÄQX>•f*™aÞjt‰<—‹ÀhuÏ»^í—_¢­þ÷µ¶èšµÒ3/´uZ*Æ6g›¶n±É’$ØÌÁ+ž00€_•]ôÌ™¡ÑèÅrF<—ôø‡ª¸FyTÁhDÐèî€PètÔ_‹| 6¶ ª)^5@v'zè‘GìΟÞ1„Iñ çrŒb)²OÚÂc‰i»$—³´Y­$ÈÀ÷AVpÀUÚÁó äѺÜzôá6ãí:]‚TpuðÓ WÔ ¡|ÁwꪛQnÝ–¶ °çïúõ]’òW[Ïô¹¶ø[ß°ºO\kE3æ%EÅ,'tftkeF݇öúä a„À ƒ<õWÖ 'kæ‚d8Tg_þ¬kwS{OÝû`LävI]R.'G:êöv% —o= k¬·#HbØA›iW¾D+0¤®@™-q‹)§"2·Q’$ßýšˆCd©a¿&µ÷Êà:ù’+(=ôà#¾¦ ž®¹oqêà1TªÞÇsl8Tü A<2Ä“É÷ÉšÉEÅ ºòçÏú[,G”_uÏRÊ~5®šŒ¾Fª2Kݨk:Aæ+Rg8_Úy½+þ‡xÐw¼!ι³k·æßF|H>`s}ºüÓ°aQ}r—\ü>7ñ÷hhýžˆWïE'>ÑÜ¥Cê:óúüh'† J±ùzÐáÑ&q|÷«j€å˜ìV4GõS¢õýå — '¨Ï€´ghuŽÌaÜ#%yL#ÿqì¨ûÊEÓW3Û¦^þ<ÜmX–´Sð…?ÔlÊ뛕tÎÖÀ(”fG{ï€tس³Á%0™]lL®ºª pâ5»ì ¹Ò‘ ÈvTmMñPè­? ã™'ÞiÓÆl߯ŽéyŠ‹qrvÍáÎlÚµ>9˜LNÇŽ<»„#f¤.]ZuPja,~iü§@Rî\ÞwÉOu˜…ÈÐÅú 摤}(’(KZm¨[<(á9:™z¦}ñ­ää0!?t ÇÂÁ#Ža¾)´$¹d[´N幎rÔ^õ_zH ÌhÂ92˜$ÝŸG,㪘Ãê„es„²'¨Ä%ÓµÖ÷™T¡¤}nwÀtΑʼb>–«Uà ڠ€Ígq±Êã•©ÆÀ2ôˆ±Q Æ (O©(³eEèÄ_lPÁ)€lÓy0H΃ñ‰Æ4Ù¡‚éC”„£át¨qê…Ž¡Ð” ûp: EòðE‡¹„Á9ù¯Xƒ5ІÍÙåY…'„#Òxƒý óKH3Æ5~ €FHlÄŒÍ5Ù;ïÈ;µ§]•(9BOÀ®»;Œp;@êz»Àáõz.BmG³÷ä¡x)ähô~äô’_:gð©wÏ~ØÍª.^lÕϵc¨‰:Îwǰ8ž$ªªp5ŒG#T=ïbï(J¾l aD*ôæ¾Ã/T<†ŽãhŒœÿÁAî& ‹ÊÓMÔCˆ"Q*Jg-¾Ïn¸.Ũ‡¥RÇ]n D…FÐZ³mšÛ  € ’ Ì ˜ŠO¸&Qb×÷l¨Ð-;ãn-UÜâ_QuG·c.vhïGŽ`EMÆQOHÿ§+Žýià(½N%ÙœÊuš'èÀŸ×›À‘54—ѬUBè\DP÷€Ä‚H@U’¡¤G·U Q·½ÝAûäFÅf¤{6É®S¯°1øñ3îÅØ±(²¼ºïà>ëx|¼›ÿ‘Oë#ëÌ5èL™å»$`µçt¶r‚$„òW‹u&.÷5/û+¨é¾§q¯Çá£Bº# òäJ§6Š÷qÚ©Ú_ÍÀ+)4À2üQ–8XÁ1؈‘*Zoü®® J=øO3Z:üÉrŸ DUfhæ)i€ìPgÕªilKæö-P¢ry#‘(&Ôâ!i¬˜Jö7î÷U8Œ®:ߥˆJ:œ,Ø ån¬ÇF’ô:uà#šÐ ƒ• ^ vg—„äÇ%ñ€×¯üž .».ÓÏÜSGlÇ™HØ#PrÐÛé_ô£Óé–}ÑOtDòtJˆc¿I@2šQ\¡µ×‰êFí©GaØV3ÆÃœÖcQð8ÂŵMi½»¿«U¦yŸ¹ÚŠ§Ï“VÎìòÅ0dIy,s¹¬¡—à%ô¤cOy”ø9 ©4:]pÆ•Ûö¯ý̺vnÒ¡¶ÚßM#U8"¨È÷ÙŒîžFñ‡Jܧ³[¥Ò+µ 6¢>ŽoÑI#GuãšþKg…÷ìèãÜß–x}!ñŒtMùN%mÒ ,–#}ÐpXaá{6j›Ææg¢è{®p„w Boý—/o@œM}PuÉôž…Úí<½>|µòñÇ|wp™*ÑüÈ·àÑ㡞2ÒjR×øq Ôµ€ r §§wë!?¡ŒÜ<Ž2årЀ´°6éHŠIÿØ&¡eÜ—{é‘ºŽ’¡/]„æ(ž@øå–*Å‘4©Gsy¬ëfAÀ±p®å*!ÒC@’ ÈöÝt§Õ~ä-6IK"}£ eÒ±Šv­Å)aηàNåÍiÏ ’…E¥JBêª8+Oâ2 àµoúgíÄs—µ?x_f÷ À£WCà Ÿ3Êì`%ÆkG/A%ú´õhçêN™¥'‹ŽJ†i"ãpàßÇFÃÄk: ÷> ÄzS8á$vttÙt BØs@lko ÒˆVÍDÛTô˜&¡q9½’zÑ…¸ùŠÁÀݧ©=Ö­³Å‹µƒ<+ÕpQ}è˜Pµ9ƒš1bÝyø¤h¼Äìuªîñ“)¶&±H¨s¨Üu˜ŽWžNhË 1?Pg^ÆîIMÚmž³Žº$õÓŽœ€BR?üNW¦ÿñÌ'Æ(†º(¼e*Ä V+²-«±O¢m+‡f€’(fÛ¿}jþʦ^ùvϵM|¦Cr$ ìÀ»l€$@Nd®c·ôù"IVêGDŸþiùB¯ÿðûl÷O~äb¹Oؘr˜¹3¶È£Ž¹Saš[Û]’¡À¤é¢¾Wã`êA •J{Jƒ›7I 0BdoØ NÏés+uÛ #çÅ Qì)‘`âý`ʹïH'ª…¹}ŒÞ[X5.¿"VŸÛ¨HËúÑ‹@‡*©S=û5ÿP¶£|^·ê\†y_P®I¿¨+Lç5XOeuPÕÔ;³Ñ®«ØR¹±í„c#BüNuŸþ“X¸øÛHf§;_‘F÷ëÖ]:.—N—³±ó5–•oÃúõ¾Þ{–6Ë$Óõ@9Ä„ÎA!æõ8Hg@êånß³Õz5ªž×ÜFs¶Ù¢xóKTVþtÐW¡v´Ï›2SghJ›Ó 5”YÐ Éup`‰¸ÇÚ¹¤|„DÀ¯¤ßpÚQVhÈ4,Tk¤Çý²ërG«$HlŠGˆ÷ Ÿ¡¨"ýpÚ“nòúà? ò$*<}¬œ«úÑaÈÖµê+«oÕ/ù+O^Í=Ô!)Çñ4Ñű˜ø¯ƒ>â®ÙÎg‰!½q±£†ð´·¿Ð*/¾Ìš¸W*Œœ¯Íþ!xŽ÷$‘`é8GñžyPœM‚ê`Ñ[eŽòÌ='TW’p¨KÕfR©É&âÖU ì1zˆöÌ–L\ QõØ<£WW€&àLZjκ¹sˆŒ5Ф8dTÀR>j¦ò2€~þ‡ÊGYyݺu³õìÞc-Z?-.§®a:+¹ZÒÒ\gE5› a¿•èØÞîJ˜UXâ ÒíX‰4uó7¥Ã½×Cr¯G²ê…rž-ûnݺUê_»Ž‚ÐDqÕôÝ´q£ÔNçÔq±èÓRùuy¯•œþ‚á ˆ•ûÄÍ›6Xw£v»×&)ÔV±üBÊí²¨:‡þíb³¯Íz7Ëîž·ZvxM}Ó¹çsëÜ4ô‚ÅÑ5³w5™);üÌ ªc•ÃéMÇ/"ލÖŠì¥GT`êgÄÎIÂŽD¿]üÅDs˱¼t¾8§ö3ÖÎ1HékbõLšn“^ütï|‘½³_ ÇåI;éÉUx/4L¯zÛþb«}ßǬ[’\q•“!ÕÜÏјKÅJ@NÆç—Ä”Îð ×½Ûu Õbˆß"O„s)…^޲éÏí.€$÷>â…4b¢L˜8o™?V”$.a 0–\îxÛ2S'(É# °ÿá­mO³UH*rõÈ¿÷*”˜Ÿ{Êæ” o^ÝØn ;l•6þÜ!ðrÚBwèê¹êÆÿ‡kƒû@]ébÇ2Åý’tÖ­[ëÇ>2?À‹cm9ã;HðdB.^ˆCÎ z®z§en2ÉÛÖÖ©S: ”9Ó\J#uÙ:ø ë:Žwÿ&›ÒÓnÛrôÄа„mËr¤xñ¿<‡ùa¶ÁAÙ÷Kµ>¨9¨‹4‡Æ5ª.S½ºñ:Ñ'O€ÁY3´+eLu”ïƨ¦}ˆÈ¼ÓSFkª˜]ïí¯>m‘Õ1Ãt9A2-v¢ÛÃl2¡TT~Ô¥rV)hbçé•6œ?{ã,kC©Î•Ææ½É§:q瀨J‚yx…½4Šºª@PéÜP-¿4ngÒ$hAGð$-€²D ‡TéH8ÒÊ8.5Ó}Â+4ÞÑfÊP´£ü¥Ô&@ZÊ—ÄÛÛ°Ù ÷µZµÊ…”HOÿ“w®g*¨Þà@TR.¼H¥žÞƒº-ÚªPÒ9ʼnÈcãš8ƒ®‰WWÔêFè´yÃF;ã̳2tãèÚXÕ„ ¡¹hL#Æ««¿Ožýüo¥}ðᇭ³©ÕJȳ¾acÃAîhPä[Ÿý_’o¼™.ô™Ú¼eÒš&ë­Ùa Õ³x=¦Îù&á¿C%”ö0BO$Ñh5ƒ$MêpHzd@ŒlÀ8Td£õ^tŒõ¯®¶X‡Á-_ÆRæcäÔFóuä-»…åõˆÏâ éIfÇÊp\NŒÀ@²ãÃЈ$rlyTà½4=ÇÅ “Q¡ö–¸Ã„HEr' í^•L@¡²èìT«Ð=;:uÀÖÊk¶$LÞ‘Ì¡ÆÓe~Y¨ÅNž !, Ì@§Ù™e…öŸíÚ¾Ž)äƒpe¸‹T ñHس¹Bc|0›ÙNýUí–IoRÁ±¡Íž6ÕAºc„9iL§V¨óâóµgeMs—óëT½£¬tñÚú×Z”W:@:$Š͇ é£éËÔ°ªéIä–mÞfÛ«kõ)õñ¬_x'Íw´ujP¦ÍiبN¥¹¥Uª¶ìŽâùãéʵC<æ“3ubæ1sÒìM|ÃsΣ¥rˆ–ÀfN /'‡I¤ ¦ù [¡ùN<Š¿[/8á¬ó"•8†R¶ꓱ–‹c­nà suh|¥–"2XÀ»)@†š$BMÝUpD üÁîb6'bŠŠ„xhÁ4b ?‚/!à»Àœƒi#aF©Õ©ÒÁ2DqŒ†1óIÊžÅ@{³ÈΨf.™Ÿ‚Ën£+g3BãǦÝAí…²7WIJ¬8w®åéØU6×PÓ§8$Ç;M|tPá^¢:ذz»ýtó§YÊ~˜¦¡Ó tMHëñPOÍuå ç;wxD‡Ó ›)RdŸF`]òTnÉ1õë઴cGF1cøgîÛn+šeI/-·üieVT*ˆ:‘©¯GØÁ`U§õ´J3Pfvª ÷X©_йóB-W›Ú¨øŠ'jÑ‚Ê>±·Íšµêñt”“?x°³‹UNšk*»_Óf; Õše¡ðãñv´:,–‘Öή=fÙqí þXúEÚ¸zuúÖ‘`Ó¡ARDfúi&í•8…J[œ¯ÑñöÉà!iÊKІi q ãð™T]me’àtî3 ŒFå¥òê@(¦wfý`@26&W?iÅüWeêâåŒYQ0ZÿUܼ®RâQÏ0"q!‘² ù`¥ÊÏñËÐÐ)Ѐ—Kõ‡XGA6)ïÀ(²aâçÙy%6O÷Sþb‰66‘äX]¥“ì 8Í2yRWݯ:€çþóO«íÎu›í ¦“…Øãî£ fÐ34ôø½_¶Nh‹ ­©±ÉfÍže[¶l– ÞèfØ€‰ƒ’«çG¹òœ‰ŸÄUÖ‡§Í±ÍµuvFaÕ§ÁùEëé–ÿ"ý ¨|ežÐu†$Ùú?mÓúã~kTF*ŽÅj[¥3Jm`b‰©Ã€&S5ó£¥sºö88ÊÁ‘é2Ëñ°¡q1€–Ûžãôô]‰ÖiŠÞßvØÃEåv^a¯Ö/Õ[ñzý“%Ë$_¡zšôùU¹5š<þµw[a “’Ãñ$Ú±”¬SÊ´ft¸Z- »IÓy|ÄZëå1ÿ<•õT­ÝšæêÌõgŸÿ@: h“tâšv“Ÿ¯Y>gC8¥«óÛ0 •$#`äŒKö©˜P eÄ_!BøOΠ£õÒ{$I¹‹/²Ù³gÛ¶m[Ý>èƒ74n¹ŽšIa„7€Ámm.=©xæ)ðF2‚ ÏÛ²Ç~â¼@¢W½ê0èɱSvê†.P–i€Ç핉þDàHEoC–âÓ°¯.ýsÎÕI¼Ž$-·É2ç …JN Í^¦C™ª/]j…Sj4ûKJx‚®Äý™NB¿Š'Ïþ´³É¾ð»‡l·ÀŒ°Ï£‡8ò—+}ÎéÚq‘žž5=°Ä‘9“lÄ˼¾-›7kw ›R]©ÊS*JŒŽ.†Ï9é“'÷.ØÍh[¿ŸõÚ£š¾tQ~·M“ê5Oê*ûJs²»bãØJëÌ9Ï ¢ò à%9òðäçJB9Ȧø#1üÍà»ø ¾Š+eöi'àÈyæ¬zŠ4O‚>%.œVйlÙR›7ž×Ç1˘°aØâ]Ä©áç"Æ0üÇ1=Eçœ{®ýôŽŸˆ§‹­]R‰˜†á/Žb;ñN<ÿDp …ˆaР].pôïòCC¤ÑºwÝ3MŠo¤ÍºVæ2í¢S YVЦ1•%{y)’ØÅæäfÄ.¶qÅÉ?W÷lHJ3gz„íY²»Õœ1Íò$-±ôKßüå«|\w¶÷Ø'ïºßÛ©y•ä[ŒÞÀ“wÙÝ=úOèd<[ú¡|Ñyl Ïätæ£Òq5è8Žf1+EJ”/\Hß!ýC¥E½Dy)@8üîXÞ a©¢;_Ã8Ú3[¶t·° Å|Ijt»g™æ ûÎÚ=RѧþÊb…HÉ”Ç?²hðD€ŒåŠ|I§Ëˆu£Ví•ù álhŒ™ç©èæéÜûŋمÏ}žg‘Ž åÂÓ‰ñ{B‚$¤eÎ&#ÕK—žæ«5þøÇ?ÊV#¡*#ªÖ]-ÄG*a¾´” {ÃÓ WÔ0\°NÎ3?1¾A$>‡p$îÓŽPÕ”,þg7 ×Úæ2ŽÍ2#YªÇ‹RIÅÆá1’¹Qt¢„r)ÉP4+ˆ¾J×:IDE3j¬D+LPØ|m•$P~<Œ®ÿ}¿TëGÖh › Z (œ®ŠÃs©wNÛø<$߃àè~ôÍãöò)€Ú9MÔØ<á´%,K4Û¶y«ƒB±¦o0/•z .s_<ኙ콌”t…z^Ÿt+»lC^™=[Õr©àt`uAçË öŽ˜ëøzœ¢8$I:©Çðr„¿ä1tðo°»²"†å|-©Þ«éQ{÷79X"=>U4™>mšÍœ9ÓN?ý ›:}𗇲¡‚³c؉æNXdG‘ü‚"»âµ¯µ•+Wú`I§ÖÃ`ι‘{Å|¨áÙjµ7v¼þ"8RyÔtÊ ‚¢^†´ìtIµÿŽ¿,é=Ù|–}ûJ%š¨á4~ì•HÆ.•јGÉ‘½ ô8L(§!ƒЃ{]Ÿ%(¨¹ N#·åY£ÖI0#ŸÇö´ÏþöAÛ®ALYõÞãÍd5È„€æ QˆAp$­è|!¥9éÚSU|g•OýÂ…º3í¹Û%˲âR¯?÷£x<×,ç@˜¼ £ßzˆÞ¼Îb¨<ëR~Ý]l«Õp/Îï´Ép@’é]¨ýÒÃÝЯ8ú¥¦³m  Kvù±iäi€ô@Ùg˜A‚ŒÒc«lðÒSÙ•kIgÝüù¶bÅéöâ—¿\|”Ûdruöë§ðó ’ØýˆQ1R¿&.]¾ÜÔ=÷Üc²eˆ´#Ž£«î35…„GkvsÕ8Hš9ÀQÞOoüøOÂ誷ÖX³ '¤"†—?&^ûÈ»À&ÏH—ÉdtÔoÀ2ô´éHbüÿ:$dEÅ»"M™ó8ú”h·'l~tìr¬T¡ÜŸúí#öÛu[$YÅFId¡ðÐ Já!ÁW &y§]Ú ¨ûä]$£kzx泟¥±¥V­´Yësþ*´W(i1Ȇ”îgz«Ôà¡\ž€Oí>tü†d 'ÇŽ â :Ô}v}_™UÒoÏìïxú&: ˆ30—¯<ø)¡*¦Éމsz(“t Léñ áéo”í‘ób|íó¡‹<&yn¤ÔÑLI‘s4XsÞyçe2½³N¦)7òãèÿ„Ézy’$‘z^}啲I¶ùÙÛ«W=îI…q£ÿîh$.eŠñ£Ú¬¶æ ?*ò”_¨.EÆ÷ˆA¡™…‹Çíù0Ô%¯Óñ’>vJ®~X–ì•¥²-!QúÔ¡D²dŽe„†ÆzÔO4ª„ Â8!µêýÙšQ}aVMfÈ·^J¦µ{w4Úgî~ÐöJÅsé†@Q·Î¤.ÿŠ/æÑý%ßb’Л{œ€ß„ÈŸ{ÎÓŽãå’$ùzm¦uѱTiIˆ+¨ NO§­~—Nß_yÞ9š{^ª ÐÄçª^ýªoù*wì÷våÛú‚ »$¯Í¦K¤Æì „t 2¯¨ÀФ&öéÀ±n·5$ ç¸êRÒŽÜF)F£]:ÓÈô‘ïÄð¨ åŸâCÉäE¦ª“–GîbZÑChÌH@! °GÃïÑ #–>¿’Qp5’Íûdp÷o !@‹ Bï—²RNJã=¸2‘W\hÓ†åjwJ§˜Ý} zþ´æ<Þµf£lƒÉ¤jON”pb¤ÓV¼zÇÚ\ÿ–”-ãCé†0! ?F¿x ¯ïD°éÚå·_»!M1‰³DfD›zÐI¹Ü³~¼Œ$’qøa5˜Êέ®^øñä0ªÚ0} ›#y8 qñF«´s zíýZ±!;šìÆ€T¾€ÓíàÐÓÓJ§—I8G^¿eßù¦¶‰Ý±Yk«™Îúu_JØ8F)>;äSï¹X³"Õ×Û¢…‹í‚‹.ÒÄý"ñ®Úœ*Àw ‡†âçÑ é ^ÌʲCìH¸¹ófÛ’eËl÷ž=vÿý÷¹ZOÚs¦c;[«Ñ nz£ÑopúÆwý¥{í$DÒ¨³…Â&Ái·Ñy¼’ù¯àÄŸ`t¸ÌaLLF—ÔH ŒèÂx¨àü9¢ÇDŽæªô¸•€O¡4…0³à£Zç|VOòIáì>h_úýöSê‡9¹µbù’¢;д -ý™¼¤¾“– ùÝ…“'ý_ºd‰ûݰv­4ƒö`~P<ž âô(=!q†/‚ŸÔ8½R’°1ƒz·€“t´Ø`¬qþ iIGª\+©òÒ‚V›%IÓ%mØ ˆž¾¹š?.„óÛaý8™ÒçFVˈö$EújÕÁ‰ä¨–™3fØŒY³ì<Í8™­¶˜§òÑÁüöÞ¾®ã:÷€h$A°€½줺d«Ø²ÕÜ«,«Z¶“8vîužã”çÔû’—vï/N|Sü’Ü”ßÙV±)–mÉE½Z’UI±÷ÞH ¼ïÿÍžs6ÑH‘t‡ÄÙ{Ïž=u­oÖ̬YC=CO•Ú‘×!)ܼ[N…S^Ë$®Ò+<² W®»á†°rÙ’°´~XØ'û„2RcæKmâÅ3Üß ¶"p©!!^ÿƒ»¸Rø½ù0 gvôlæ%8}n…v&c0ü"³èVœ‘*m5Z«™µbž!¨ I±›•pÒ%ZfOü`8Æá•e•Eu#4òAbÚ§,ušÍ}ÖêžMÿmƒ‡†ÿõÜòðÐÒÕ>”«]Vs jƒ…3H È\8ê'&ž³~›±WÆ,#ò1pCu¹s”YE]qåU>·hÉ’eÞž8DF,e1‚´, Å›ÕA+®Ñ/~å7¼ïƒïQ+™Ð˜üÊ i²I2%…H÷ø/\x–$²¶ðèc² .°a5Òí¥w‘ â/TìKw <#ÆÈº9Ò>ö3ì2þÔc«<G˜ÞŒš} ƒD+ß =ô)ó€ºI#UVjØ Xb<…ô#ÏÐ[qÐ(¿à'Å­„â”,LŠnZœmÕœ—Ö'$EÎ ¯ê„·ÿùÝŸ†Z¹öy4ªqDl?ÕÿX:1+iJƒ—¸Âs|ä9¾Š/P¹Iu–…Ðcz§#xª–´üñëoÔ³ÛUþ…èô¡_q8¬öVÐß4øgiwWÉ/]IÓ –ïÇCÈ•tl/¶Th®rHx_Å¡0Y•¹G+7 ƒ‘ÒÓð1‚`,7õÍŸ÷µ#*¯äM«ÖØwÜ¡"wH)é‘öŽ5ÒÌaAT&Œššf†w_q…rW–Òk­–m‰ò@i A¢ˆüÑO^/‹2GÃæ­[Ã’Å‹EœØy¡c˘:#G|<¬E{ÅÛþºÖرä 2²k^r,¤B¼ú#o–Xt.†@°)æÕƒ"ö,#øG€‹ŠéCUFÀ’£*±¸C׬ÿ|Ÿwä©Á°­]‡>iqè€æ¼öîß6JRý£ýÍá™õ[¤gª]?5C[âV$07–pÄñ]סhúÐÏ¹Ô ÏÜðW’€ƒ¦w1O¬ŠR'O>ú˜çD9{#Ç|Ï"—…aw%„Ž.„øðÌŸë‘RÐÒ0Ù§Ç\º†3´;ëå^-lÝÝ>8,T}ïR/xD 8DÇbS:éÛÔÄi´à’Óþx¤x”å™w,H²ÉZNóŽÇTš<ÆK²Is‘óçÏ Î:æÐºËÌ)ò+[¤>"3DF4Q¯(¯zÏ5a݆ ¶ ³QG•¢'™@ Æeñ¤(=F&pø—œÃ'ª·§ÞZ(2çyèà/̉é+F«qxÉÜ&qˆÇäJ‚ç"@Æ n”79`» ó­$ à,ò$³lù!8e†Y“30fL‰TÊ7¨ìÓJ)Ë~1i«¤ìÞ“:M£/{™iîµæäX©¤Èük+å »kw¡\J")|Ð4cº%³Å‹…—^~Ñ‹$΋êGGFQW©:¿-þ$ÀJ><§¿ä××IW:…Ü!Á’æ«)¿¬¸«Î;4Ç­y) $_Ño$G:¯t¶LRgþq¯:)¤ÇrwõÒF¦CøØ]só­·ªÔŸ›´Ü‹WÈYƒdbˆ–iŽr´¤’‹.º(lظ!ìÒY&d|´F«º¢^³W»EõI3@‚€8…‰àÜbF²‰g=[ÚŠ_ø7I‹Šçž8Â8]'Æïpú)$ÁØHKWœ•! ÇœCp¶9"Q¢_É*8`ÉwÔáÚG"6±PS"ѯ£Ž0¡&®7H1_çô”f”8KŠ×¯¨¾ã7ºyÙ¢ B„ú¡²>¤+i9¤¼¤4ÓwÝ]S]t÷®?~‚cO9pEÉUkÜ΃ 0¶ õ¨|bDa,ba%?¯¶Åè%µWRëÙµ[çËh˜ÍP~q°?ù9ÃÐ9×bÍù\`)Ò6fÏÄŒž„<•5HÂ@¸Ž©>èů¼æê°zÕ*+$/_ºÄR& â^.„óDè…¡¥Ó;—ôÞ²R&i Ÿd(X=ŽpwzŽñ“¹J(Ù@’–ÒáüxGŒ4ÆGœHw‘ùdqH«àµ5‡uFU<Ò J`Éw0çˆp z†€ãaìu² ¯FÀ”d'Ц>,UúóNI×Ѹ~äc¾ò¬¢$eÙRàý«/ GáÒÕ}üP¯¸Tǽ}£/$¦¥p±UCfU±Ú¸¨-°u‘é®8O‘h£F:86½Ö´Ç™f©Ç™=ÁŸQ#µX3#œÞyáú›nq,‰ÊO0Ê3ö³²É´XÃ;’…gIT×¼ÿ½ÚÞ¶E ºûÂz »a’(ÈöEÏI2„1 â_Õ\ ’„É$< ¤_7¤‡µ  >D"äÊwÄ£ÿwÀ.?=¤Â˜y3É IÍ Ð%Rº•J€ýê(c‘E©j1'`É”Âí­fÞkóíZ‘ù,ÕÁ©rJÊŒ„ Õ{HNýH¶Ô\uGdÀÔ[úÔa«5÷¸àœ³µÚ»Ï+¿H̨@ R½ûŸÊ™U®£K —®xæï{K3ewßGÎ+gªol£25ÑÆÜ-Ïښخ}ÝqŽ1B<Ô=CoèøcgÕjI~h¯Y`y"R|oåx3ßÕÉÁ,m=ûì³Âû?ðÁ(€¨¥Ð‡ˆ®lA²')’ù"¥†¡Ó¦M_rIغu›Õ-Øî™Qtcè0 ä1-DOü\PÒK‚qÞ9Íèaèó½Þé_š—ÒÛÌùË,…ч…!¶¾+$éŽÇ'.~îÛÈÀ%^²wýˆ˜Så­’02 H"Á ÍœçlªLä‰¹Ô ¡$ó˜Ô,¿É务0Yᨽ;ºQ€_6ÈÈ.j1Tt­€_•£ÿb<={ BŸ¹ÅäËcþ>¥ÙÓ5Z¡íñܘÿM ˆJTÀ(DwXÕ+áL/Y9ÜAÐ~I:T”&Á>Í=”¾§Û­§Ì”™?ü0iü„0eòd[á=z´y~ g¡0É•-HöÔn+€8D‡Ù_óÞ÷…Õ«×øpûÕkt¤¨†ªH(8†±H¦VóÎsLÈ8yKÃD0FTç d ^E@(œ¢.’w1,UÈøQŸ™Å XâdI”ô¥øÜΙC™ÄÈþ^Lh.€LÙK×8Õ ,À£êÏL“UH¾^¨‡SYæÌšéºYªiæPë•êäô%µ'iS_ª(wêI“úÃÑùûR¿ôLûáÜŽ2€K;,®@Padüzï°’&¹z.›•ºw;8º0¤õ ÕQas $¥Ýب]5³5Ì>?\uõÕn7üa)Vú¢p ™i¯v0üË?ücø¿~ë·ÂG?ú±°UV®·lÝb)¡iAðÁ°Hq42’Œh¦ƒ ÅzÁ$bøC¼šsÙCv‰lËkîºÈȨ€ „°é÷Ÿ‹ž1¯ ¹ó¦€HdzòÖ¢y°£bú7ÛL,Ú¥B}¦º4‹.ÞãuÙeï°ô±tÉ’°ióÆP§9VÞ2Ô¥-¤ãv ¾({Þw ¹'LÞg?À¹A€ÃÇØaß!! 3I‘6')ê™ïF§ìk1´#ëRú”‰ÊsÁoVg•¯Ÿ“yÏ0€œ;wNxß>äzc^šúKDþþd¦ýfÆ5 A’†Z©E›Ÿ>òp8ûõxW]m;…Ûe°ôµW^ ûµÊsBüúï!ͼŒOÀ’ŸÇÒ£™¦ÈETë®ùÌÕ@dc¤3[?ÒQ:º#}oÉ…üò§x`Jü Ýa»’¡a>‡ŽæMú!€ùöô…çëTk–â#°Åz὚×Âê +Â6F¬oãwð(‚k”ÆâÞe¦ƒˆõ‘1IZ„ˆ`l“¤¹:?ÞE@ÏYÕmEÌrÔu¼ã©‹‹µ./‰‹þ¥á:mšHþ˜$‘“&M —½óaÜø±¶‘0„6u„É%°LÏá: A’¹î¹û.“÷ÝwÞ®¾æšpýÍ7‡Ã¬8jÎÞàæì¬—$éx> æ̓?˜ ‹C­È80çá E.*Ò@ä^ê/2¯¥U3LØO€,ÄCÔY<Ä‘Ý'€t~H- oi)¾s¦Ö8ùïˆÚḧÑë™ÆP=¸Î]ézŒ’bEÊšî>£$j€C¾è$¶ÈPmGTª´îq» ³—~̵‰ÁÑÏÑ3½LSYI_Ê;Úsüçhí—¿KíS6nÜfÌhÒöYŽ“‹Ž²4 ˜ )ÆXµzµaÎmþïö§áwÿÂûßÿ¾°MG–bVmÍÚ5¶dž&š“4Â\”‡×â†^À“ n€Q*%ªèr%Pd–ÈB\Y<¤n€dÁ ÅJþR8ªçýô’÷€ú딿£RaH‹4‰d£Ö©—˜$k–Ng G¡j_ó0ŒË:„£Ä§Tò¬ xÏ_’­›ˆ'IU(²°âÍc=†©'ÚˆøëØî´kÖ´T}ιÉy™œÌ3&pÜ©6ÅšÎvýí÷00–šÒ%uâúS]XêV§Év:ë¿dúã’œ½Üªñß§—ºÆ÷±ÝÓtµ!²Îü' ›û¸ o±mŠ¥ø+¯¸BRäí#xØ£ŽiÙò¥Ú¤q Ð&8Úˆöh®,A2Ql ŽHÅ¥¬Y«¤?{áy=kÞÉ Œðþhøù /„o}ëÛÖƒ\0^¸ì’KÃXñœt…¦ÖsŒÌ¥ƒ<@Z?ŽÄ`,3—ðáËÞ’Rðcèˆú>åWWî j(NA?EV±ç"c¹°t®ÏËÎa+³ª²Zûëe2lÏn­pïÃâ|d*íÊA‹¦PØj‰%íš2ArÜ©ïØúp2Ìöªµ™5V#¸m°€Lj$ìÒý©oþ%©]Z¡X¤`ßö í 8ÕpM”ac”PÂÁD­fs®ýåï¼ÜG¤|õÏÿ<¼ôê"…씽ϭ2 £ÝDÚÝuXðᨛ¶hC¹ÊvN•ˆ’aLDá‹YvÊü?ï3&ìt¥qï¹ç;á]š—¼î¦›ÃP-ì‚Éhlͱ ¿yxm<‹ˆfý8ˆ¾@ø‘Y2~°·nÕ|0 ±aÂÝÑÇ4ÈW!îù£S ¥ëP"Z¤ —Y>£w–¸$a•£+Åd:¼ :c{¯Ž- œÔw’ô ©á5 1!»‰hËaÍ?ЬœÓ!º}2ýA ´[‚ÛM‘}j7h‡DÉÑ#ÍEd)ÃR¹×V¾!Øä*™«¢±,P&‚œX„‹Œöwó7"xUøð‡Ã¹:xŠæ'iܰ ±3\ƒ ,3 Œ¢Z3fRü)œ{H“pü§ºgUxsþDRp)߆;§,Z‚tÞ؉(.%ÈvK3¤ÒF¹šWž·„@•~µ¤šy:#§œÜì¦Îî:í«G:¬—qá¼ .hÏÞñžùF†ÕÌCÒ0³x ’©¾¨ê ÈJ2J€´ õ™µ¶„ IDAT ãù)½Â_æÍ;èÌíH{p£äȾù4ß Cg,‘§Iš¯É:m°Ì~† FH6Í Ÿøäu"¯Žðñ}PÙ¾BIPÂçÐ2ÔíÐÁ D€¤\e ’:zŒÉAÄôà»l6!“ˆâG ÀI/k%õëýךŸ<>ú‰OhÁ,K•Äã0úÆs“\FxPQjˆñˆ+"ðŠ€ ¦º‚¤°[ I9 Î;;ž“¹QZ(ê1*+4×UEúúgÙ˜o”>érDÅ {ýà†ð‹ÕõY"åq¹\«£HÌ;vl÷q¨šà(W\˜9ìó_Vo×|§vQ!Ez1‰ pÔލˆÆJbyÞ±D‚´.HÿŽGÕc†—Ã@ë¶, «Œ€£¶Lú 6†Ø¦=Ùî”.æÁle}ΠšpÓÐáám‹¬°å䨻‰Ú›}þùçkgÍÜШé¨?û£? /¿öZ×bˆþ˜ú@²gd HÐ] ŸÊ«s%ÈÏ}Ð@0ÁQ$xìáGLÀF7… Kô ˜£á›wÞ¡ù¯áá3Ÿû¥°`Á<[ÎyþùçµgoT@MŒÃ@*I'$oPs¤¸‚¢üá`B1 s†Üó½Á11 _ØŽ‡:˜_9¾—+ ±õlç¿Sçf £ÔQŒRlj¯ë¬ ¯Î‹ÛýÝ™þCé¾ð¥_ÓAXÃÆ ë=·5\’0R9Ù,Ü+; Ì{Œ,ÄDUž¬¡€D‚xN]щˆDƒì®pY½òirxùƒ›ÙÓCÓ1à0ò-@™Muø™¶å[ÚCtÄÜ(ªFXýaË#ŽzD9a¤¾÷Qfz˜®C×6,_’ÔÕ–£Op†ü4 «³gÏÖ^ç„ët–=î¦>iC®³\>yÞ¶}›%IN¶Dá6u’Fd¹àe}K[–­Kê SçŽ`gq»Ž”M¢¿Ï·ˆÔpÉÏW–_vé ¦I¥¡£³*ÜxË-ᜳÎSdо&¶eÆŠ @2ÿØ®¸â) OÐ>ÔùóæZ¢äÀ­®q¾°8Åe†‹;zªª#¨Á|ŒšÉSLG‘+\ü‘Ÿþ•¤£ËBxh(¦4ðÆŠGÃÌÖ£a°â”å1gƒ(G >¤Á\ùcBemµâ!ì+I’aë§?û ¡½õ`X¼tYظy“‡ÔÞ1#IÒfƺc:0¥>o/аPW“-¨X_Q éá³ÂPaJ· ©wzï:\Cß8:ME¥ÆB“ÅAäá^@"­ÌµÐ¨rHuúvR§Ž]•ÄX™ÁOæ;ÂEÕá÷†Ž CeîÍv£FŒ¬d_ yÈ[t WûÑÊð¿÷;]vqQ¢¬ ÙEšfÇ i¨®,A"‡¸‘&q0Cjæ"!d3ˆü"CÅ÷]üÔâ ›8D Û¿úÚ_ÚÒÌÍŸút¸øÒKÂü¹óÂpI”Ìü™äE!€œâfxcyˆ-ÆÃ‘$CvÞ˜  ?4»H}à äÍô”4µ Q«¸f 9+[¬íü%bE"ÁPnôˆè¯E´gº£3›:uJ¨¤&ÃhªGW¬‡«»t|»eJA‡2ÅöSøì¤íj·Ž¯Ð\&ÃÅ(Íg©h©~5•® «U¯©ê  º)ò%Àªº×têF€D‚lÑb½b‰$5ø6p<ÅFÝŽXÉ¢¸˜&'ÉŠûŸê”÷7Ë18VçÍ=+¼ûª«]î_ÿÒ—ÂÊÕkœ%ÊŸ´—þÐ3«ú­švÀxqû3—ÊúæµNÊÁ \MÀ¹ï p(ó°Ê»‚T'Oî!v† E” aÍú ᣚŸüÏüPÝvï #ˆ“­WEݨƒ¥ `4H3'¥ÿqØí/b6äWö+}ÂëBf/cÀ¸8Òéý -.Õ©(@ÁÁ"EÞXQ†^2YÆ!Šý\Ë‘®åv9¢ ³Z:W[ù†Njê‡ÚŸÒVªZ4Wv&;$ó©]MŸ25\üŽw€nÒ‚Ù>¨`ŸðmK5²ÑmÊœà^=/Y²DG–η.%{µ9Cœ!uµÚžsq¸2² ÎiOw4Œžãœ$í•2=Ëp¤ø¯0¸ø)¿¢µô…‚¸%Hi?duZÃKÚ³'צwއ º©W:ûÕîÓ+´z/?t[ë@ ^ÒQÆk¡d«:äÓé0S7K{³ßvÑ…ZÍŽ'ÞvÛ7ºd!ÖjòŠõŸbáØqƒFÓÔó@te ’4HjD}޵™1L¾ÁhRœ%: K“ÌUJëwjnò¬sÎ Ÿý¥_ ÏZh)rÑâEa³æÍ`Ä´J|,,`f‚xgÆ­Ø €“InôĶûe%ÌÕäfi¦ë$ïÆ4da˜ü§&…63Wt µM”aÑÙ‹¸ÛÊÃꨛ1£G{ÞkÞ‚ùaøˆ‘¶ÎCÙ¾ðÅ_Õ9D«ÂÖM›Ãô¦&ÕJU†§žz*L7NÃÁqaÅŠåᥗ_²në.m;¥YFr§¤xÌ¢´‘ýòÀ˜Ý,‹àè m–Õñ¤¸h·£Úl€-ÊZÁÅ(°æ}iŸÞœwa)RŸ© l@’”l&êiš‰tjFׄê1Cà íp™¿¼SGŒœ>¬£0Z1O •ïEÃC´ü[¿õaé²å®ÛT¾ŒRÓcîkí¤­ª#t[áA:®æÊ$½Â-FÁ1'²iãƒfw$L“â³`è wŽù‹¿úËð ¿üËR¦½! Z/¸x¨S³¶aá’„a)R‘ÁfX€‰’ ïù«ûO@=ü‚4M^½$ß’´4S\£ÉpQ@:Ù!ð¸Fë¤ÃÎÒÔ¿k<¬Sº8Å]ÏT7B‹}K}~ìÚk½¸Åê1°B™?ð§CǶV©\×gºz„ùÎQùöy×­¯ ¨ôÔ½ëSíÚ8Lq¾Ð=Žß<8âG»²ÈèýGl“&ËBî„ Ô‹kWh·ä¸e0RCëa3BÍÍ#–*hFCTÞÒâßér¬ØOÒ"変ÃW]%‹ã“ÿýëÿ=ü°kÓõqLfÈ%u]èââ æf™Ù~§$îÿ’ÇTÜ›ëÁpɺ@›„›¶l6ÁEòï9o–è+A9ZŽÈã€ôóÞ{Õ•áÇ=>øÑjgÁz`X×¶ÆŠæUJ7îÕŽ –BÊ -A¿îH>­žRaHò´W…˜fRiȃþ¾¡4jÆŽò*º_ • 9é[[ê¹Øoê›jS7¨ÛÔiû!œ{”QòÏf€¸cƒk•Ô¹hWKßă$µHð?ÿÇŸÛªü¸±clGÒ ÷®Of2)Ñ# ‰Y”‰ Ó¯þôD|ï‹ôG@ ’¡äa=#)ÑõÇyzEiGbçÊßPý ÖÀé’ å…÷D»zÔ3Ôã©uJp¼Î§™9³)œîyá"YÂBOø¶¿-˯êFùb>•¼EáŽ?ž£¯t&úÉÌñsôî@te+IÂd€$Î’ž™¯Â¿è+´ø£ ±Ã3ݹח- ¿ú…/„¿ÿç ×~òZ©9pmeX.vYà`°tüÏ0tuï|˜¯Š¤&®-0/«Ø8Þ He†Ú¬¡žÖÎ=…sXU¼Ð_À«x`1@¦’4te˜w&:˜n$ó_³çh è ÀX!@Dú&×Ì/ã¸Zç0k@G'P¯…²Ý)Þ´ichyåeÏ…ÙT™À÷˜EÅí)€0‹ÇPEr<ãÈ z÷8báœE;$Êd~Íûñã&W8ržZÄéê¡Bª@jéXz–pé¶Û$ËÓáè¬æÍ[ yȋ R÷©ÑÜï/ʾ*;œâtQì\ØŸ‘•K8F)ÒÙUâ3 7 ³á»üN·ÓQ–Ó•F¤ÀÓ•ÚILǪ7"~èß 8jTæFL™PgwòŒsˆÇf&1ðÔ÷øaø«¯~-Œ?)|XVPfÍl S¦I¾a%€„ùÀ,¤D®V:'qÒ'=ýšU1=»A¼J+OÞ²è“$HžÉ_³@r—¬N V¹ØŒ*ô$qí‡YÊÕ4/§ô­ñ©wG¿N‡S¾)à—@ˆkþw–pUfìFÒy4ÍhÒÁu*´¤CâXQnW`.ßþNut`y•ââ@·ÿö‡ÿ-\tÁ’†fÉ_«ÜÒ5d¥;©dAy%pçEq8 Ÿþ ¶®÷0ÕŠ•+Âkšÿ‚aoþÌg<ŸÃP tBÙ'HU"±B† 1-C?¤› Ê­âê )¿Å+B³˜õá!Ž+Jbe¸Íµ}µNyT^PÚE ì4¡HfìB§þ&m¿Œà­œ©®ø3“iåVãJ—c"HMt8èBRy@du?ÀvÄ¥÷øÑ^ ,S©Ð«D:‚I«$Mzÿ<@™˳øøžº7€â—ý‘Ç<@bê “gX‡J£Bç•íåJ»¨à&‡¸pC‹")ŠÞäÏ_J›N#H;¡²¦*üËⵡEõr*]½ŽÄX0A8ÿ¼óÂǯû¤“úÍ/9l‘°AÌ+Éõ$)Ú‹6K»Ý’÷@º4sI"£±`\7Zj¼c®±y!Ž.×C« Ùnƒ¬gÊÎòøÌç>š¦ÏƒµJ;X§ûA^ /}ð0“7þ>dÌW24‡ÍŒIZᥜI5#XbÐĸ%2½óž@ø Ã|$Cq†…±<¼<½.Öw”ÊHÃH°¥8—½2´ ‘ôKÏpŸ çâÜfª ŠãÉ¥Œ÷É¡!Àsz‡?Ûé8â´iÆ ßŽÄãùIf ó”Ù³Uèž«ãU]ö#Aî—Ù"éœ]5QŽ”vm’òÑÛµXø2dçiõx¸vGU(Ï•u:xŽ¢¿$Êï­–*Ó)tÔ»œ¦i+èÕïyo@ëŸÿéŸÂ“O=©¶BÒ†–˜ Šm掼ˆ—}æÌåôh¨Ï e ¬A2I\Y-búCØ–ú „ôš+R î TƒH‹ ï³¥h~Å»¯ Ó§G&58H@|ÞÛ-ÆNÖDG–ÛÔšâ°Õù‘ßþ4F _—MºGscÿ:DöoíÐA¡fB]¨®¯ mZ¼ˆ-eêãé?O“Ó“æ¨O€ß Vû(A2?Èê)¨Ð xüÊ+¯:]@ †Lm˜2“À1IŽ\js¥×®YùéCÞÔ4ÓCnâÎj…õ ©¿Ho¬jSEäÉqŸÚs¯þXÍ>"ƒÍ>.Nx¨F]{õþCýëÏÈZ¼Äo彬1, ®“ •¿A’‚;k…ßyü«õù‰¿ü8Ìë¬giƒÄ9áœóÎ /ýüÅðo|£ŠtÞthI3@ý‡HÉ—zèÞ¹ɽŒ#ŠDÅÝSξÅ.¼ KsÀ<0–‡ÚŸR¦+-V$‚R߮Ϧwy¥k"˜Æ=­Þ}ìúÂŒY/…V)šs\í.íD‹XYN´@ïŒ#O]¤G‚)®Dg„Š!³¹J1«v¡?#‰ç2 í/l¨ ƒjÕd²v]¥rKµDé|é>öœä›òÃܬFÓL-(•w†ÂÌ v*¿Û4¼[/epJ¡í²6¤žR›òžû|{®_»6|ó¶oXwñÊk®–T4[[l¯½þ“–’ÿÇŸþqX¼xq”S~©så7«ú8E=¸\}d‚0Ð]ú¾àeùº¬%I˜ À’*ÙFc»âo—Á‹ETP£â,5dAýʾñ'űGI$'¥‚~’’Pιõ³ŸµÞ$çë,_ºÔFc¡L%qƒ#~ú"¥“îcNT½g’´øƒðbÂñ=GüñêMá7æNïk&©¤Úሑ¡}§æýR¹b ÞÜ_TCPÌf(Æ +ÑÒöK—. õÚšØ*éï–OZ™d—F$ÖùœÓ¾¡SƒÂlÛº5lÒÖäCG›NPÜZ¤×6Ôë4…eZ‚:¨qäá¤}ÎæÑÄþ!‹E¥ÎÌÎתÿ~»l´ÿšAGòR­m7^13TÓþlI’Œ&î}}ø d±ãíwJýÈ´Ã$©úL“ŠÔ5ï¹:Œ3NG&ï üèÁGVG…U\ê ú)³#½ÂôpÃÂ&õ/¥i©‚–­wYƒdªuvn0´«ÑQžkÍ 1äR˜8²ˆ¸gÐ*X´÷^1Vœ0+CÈ$½ Ý£à¼kÚb@ÌFíÒ=a-˜áb||c½ÂŒ I‰ÿä¹+@2GDhÅ¡²’€ÍóW—®í=¾z‰vž( Qqe¥—aí©]'%Oýwä™=½íRÿapBÊ{éå—­`Îñ H<ʶ˳Qßh0²¨…s{É@íÝwÝ¥ùLÍᩎß}ÕÕ¡]‹!›µ˜†„:XJÒÌ9òM…¾'-¶¢Î!bšøzHÓH¸ž.-FVߪÍX©¥ï»{Ö7îD…,Åf”Q.ÐQ £šF„A¬Êúá–~ù¾'Âsë6:‰î¢;~ÐÁäI5¹0\xÑáü .4-P'á§·DDL±$úu}ô8¾£¬^DTGK<ߌ߲I€¦L:Õ@0=(*!½¶/„­°w>\iÚb,'I=è`€yi‡{…úÖÏþbd$Ì•+%åìãì—‰$F\!u=w¥2¢™väøC½øÙ–á½÷ï WN~ó|é JІ9êôw¦F9¨#þ*P¦˜/’lgÛ·wOhÐ\kjKÊGgÇ7jY Òà ðܳυçŸ.¬\µ2\÷±…ygîùÎw¢E‹ÃÎ;Ãı£½wŸíƒ€2Z¦Àö# âܬ3é’tJiB)Ó<¤ßg÷7]¿¨U;Ì”H[7kBèÐóÿþÐsááëNý®ÑÛ­dÏœ93œ'+ã×ßÍŸQ¿¨a€Þ’™Ñ`*£·Ff•zç‘L×"¥ Ç\̓bB_Åyþ8&p™z”-HæëÛ+tZÝD*‰ ‰Lm‘€rO™(bŒµË³@ µ+ÍfÒ ¡Ìô[ˆ"¬—*Ðõ7Üä£PÛ4ÿ…: Rމt²nœüýOF”qˆ¾ÂðÌÎ þY"•_Þµj~òÁµ›ÂC¶„I:åîm2zÑ -{$-©Îê(ãÆëØÒqaŽöqO”%vk°zÔI;â|QÝvÊ$ÜÓO=~ò“‡ZðiÒðñ×¾òWåÒ×_O?÷¬†ÙC=TÊžzNWD!pdÕÚaˆ¬þ»­hBÁIwuÝ×7"ËŽ”T{—´ž{øå°qßO7tÇIö=ªQ¦ÏæJòüpƒF78ÔÃ0†úÐ:£¸R¾H~]†Ü) ¿Š?L4E9_ªX,̉÷*«ÄUŠ º²I¾‹"Î/»ƒ¯½¾X+ß¾éà5!òÄ}Žmb˜ K',¤$A8ŽOR"l=*|êÖO{Û ï—/_æÕTÒ28+,q ¯õ$3L‹4BA£3 IDATΧº0Ü5Zw/%\rÅ<ÚÚ½ûýWòêŒx‡i¾°q´/ÄLœÆ7sÆôpÅÕט©1· @Š9Åiqd ç2xü°¬0ýL‹56naÞ]á–ÿúEäÝßþvØ(‰”zR[ci’au‹@òÂm-é_’+s®Ó^jƒ¶r˜¾æâ0 (|w º^¦ÕÖKr<Ž:­Å¡ÙÚäðaY®Âº;&ç8’Lø¥ŒŠè("Fö˜5ÊÓ×|iH"Á.å -˜ÕÖè* z º²ɼd`°T9…³SÌ:½7s(ÐMÑ•>I¢ÈBÀ7Lò³­õ¸<8Û#û!Ö‰Ú{ýM7…Cÿö°]ŒÞѹ++!dH£cQ'I§JIÀiu¡ 0‘ZítÅ ÏÙ×gúf¦aôð††0DÛÅ¡Úñ±P–€†ËÊøŸ!4sÖL©Mír4zŒ3“ôµV;›öê¬í×—, ›eeþõ¥K´o~]¡…óuô»¤ }P ¯KŠdè]/Û6o±µpæ;™‡¤­¼çZ `½Í>*ËÚ*ïw=«iJ$Ï‚HÕG:§òuFã'Œ—é³s¥`??Ìž3'KNóôºƒÖ _eŒdi,þúUü¡ƒVøþ:À8ZaB™¿ÿÚ%ýÿLWÆ )&È@‰Ž•µÙ³gG‰ ·šˆ ï³²ç"ùý‘1ÐÃcN‡!6./Å24Ä¿CéèĤ35”i¦üÐÇ?ný…_ðœ&Gm<ñØ£¶¿W ¹dÉëçÝ’<÷îßç’%¿×Ç-ú÷WŠŒÝj”òûDXˆSö;TÚódãt¡æx.\ …™fVý&¾H ^9¨Fó¸ZØ[³&ö¼ªÿô.eúóñ|®¢R n®¤m@œ_”4º ZÖ^e ’4d)@2s€¸è¸5ÎZ†6‡ç2Q`”µ‚áE¢ô2»&2áŠ*Éa é ”Ë¡ìîèÓ1‡)• IHì,aAçÁ0àr4*ŠÎ‘(ÕcC¡8ò"gð,H—ö:í?t¨5566†áÃGX:ê5jVs4”¾ä—ËxÂø0D‹'5úÛµ}[xæñ‡½¢ÿª†Á”v¢Ã ]:î{Æ®ÅDªIù‰Ç ëuæÐäɓåïx§±š¦Ê|Ý^ »üÁ÷Ã(I¸Hî¨mÜ´ÁJþÛ¶ïÐP}¿Ns|´?UMôå©þXó)µî¯(( Åo¦«S}ÓQÍž5;\§-‡¯  £Ä\`b–o·æy)$a(+íé.]£mF}k¾û’Rä dц¿”f÷_•§oY‚¤%µ|}‹jšÒ› ÑY:ƒ&$ßê(_3ôPCC0}VÏ9ã%åþvT%bcÿ‹ô(µqÅÕW‡µ2Ѐþä‹/¾ètÙKœ5 < M¨H‘pðir =!øQ2¥…ù·j eÉgÊÌš5Ëv jŸúÅï¸L5ŹڢaîsO?!ÐZçyFïtQ=%pLY§ŽqqøÁÒài¤ŠïJÕ’$MêLº/ÓÛ´ÍnÆôé:eqvøˆì$Þü©[Ã!í®Ù§¿¥‹—„WtíÊÕ«%a½îNŠ]Q»õ‡Šo½Hµ;Zû¢ÚG9W‘¾B—–æä=3Ý1EÄ9Ú“½pþ¼®j¹d («_oÍT§µ[*‡êTó€}3 TQý M”Ù}ÞùÞ{åeHyˆÔ€<ßIÎÍ•%H¦F`%š"‹c[¤ŸÑ£F…Ú…Ñ›ƒ)*Ä=…„è"3l/ĆšI=*=+C¹J¶KÊ}æ—>î¹û.å¹Z€¹:,[¾\ ç<7IZH0 ç(Èœ$ÕŸ47 ¶‡Ôñä;u±p2eÊTÍï­—\z™tߪuhÔ¼pÍûd‡PÒ$yÛ¦åþó>‡Y£³²éœ ðpMÎñ(RÏD¦ äÒÐÙ=üŽðÙaJ$È<0áG¼m²I‡óô3Ï„ï)OçH¢zוW…ó/z»Õ/y×åá÷ÝÆ+Éòpxþ…çtlÁ<Õýíen6`b WUõwÍiÖž÷¬ô³“úŒD>U¹páYáòË. ŸÐü,®R'Á¡h%—ï¨Z¥·‰j_o޲Ùx®õ^1Fœ_ÄÜ4J>ÍÞÒ)·wÅZ-£œ§Æ€ùòâ=Dð¶·]ËЮs] Ç÷ž_B:Ð_dÀÒÐE2á=g/szçèôÇ¥É{=0veeµÏ’nšÙî»÷>©¹ì±á‡~³ÐÁ!Ižû“Ðq†á0¨!š_l(Ö"9j8½CúœçHq¨¤p@ðúÑG?q­lAV‡ÍÖ…}4lܸ1¬Ö;òÁBe2zè9—[À‘¨å¸ž•Gê9 ¹ûʲAWb1Ϧöʾߒ~¬4g‹„øèã‡ÇŸxBÃÐYZŸ>þÉëõ×]§t…ïÞq§Ê]¦NŸaC$7m”eîmÒ:ØöH7ó°,Q(.Jê>v¨,z¼yR$s¨S´'È uÃuÒÅ…þEV6¬³×ù^}G­Ã#„M‚h¯Ô6n;—Y'´õ–ç¬z»ÄUú0JÉFµ5ž—$èÊ$M(ÇŠ#$/¬ðÀï{ÿt†ð7ÝV=i÷Ð[Âtivcx‘\|.‚& ‰¬É@DÌÿÄ…ˆÞ‰"à••ÑêL‡ò}áEo Ë¿nIqíºµa°¶ÜmÚ²Å61¡NrNDÂ\µ²†=jÄH`Æjh³;Ð)e—Æì9sU¶Ía®ÞÿX…fB]ç‡÷Þ6nX6H²¤`ªb-Q[‘#˜¹N•`HxÒNÀh£Îs©¾{»&€ì&ÆØ‘·,^žW¬\–kÑ葇çéÈ‚wÈ8òu7ÝhÃu²ÅùÊ+‹Ã‹Ú ¾jÅÊp@ÃṖµÊj:‹@üqæ6m@¼úqst-;ïO£'iNEüK.¾8üò¯üŠÏ©Ó k§.Ô?õÃè`Ù²¥–2“…öKÉwÜGÛmÚ_®‘R?gyØVJ‡;LÇCظŒâ‚¾+552\Y‚$ €žd"ZÔÚ¢|̘<õŽ‘c“žgÈ„ï§H.é!áAŽëú¼/› Ä×sÏm’4é’JªòxÞ"kÌå}÷î»ÁÖÛ}¤€Ã“¡tÌß¡xn"eE&ï”îâ<aݧ]+ì~Çå—‡MMžB`ªâU ]—j5š|mß¾=KÒnÔ·ú)é}¿ãµîù£ì Ä’mv<pH8Èw ŒÝ~Ž(¶ñsª2oãT†lO>ùDxâÉÇÃôiÓ-…}äÚë,5Ÿ«Ý)[6o <ôíèiO<ýdxÛÛ/«Õ9H4 ,ö0·Ù!Cµ¸7KФ£™ªEš:IùÓT†_ùbÜš‰}M„¨z‡~t±K3W:™Ÿ<ø€Û$µ‘©ÎŠFÝrê¡:r2ï(uÚ^2‚ðâžvV1—]ËgrÇÛÆþè ÿ)c„<¢K=WvwÔIjbHÉŠ4.1N Y|¦‘9¥óˆÈ@÷É:O —âNÏÌÔ<ÖáƒQÚ)«{ ä,•ôe×k>üT­Ìþò>îÿÏûà š/{YG4‹±·K/3Lׯ»bÒÜ=ºÖ“èšHšf4Iùz’ê ©nUFÞ£ùEæê‡óŠçweMç QkÈ„1ˆb ¥´b ¬ þYp`ÊQÞ’<[§Þ Xê ~~ß|d˜¨»,SÜg!ÂZ?>òˆ†«†Ë¯¼2¼óŠ+ÃMÚk/óÄ2f­ŒóxžNNP~f4Í”9ׇyåÁ0Õ1yJɽ;gêpD³«×®ë¦Y£àúl• š@±7€tzâæ#‡HÛ[¡:‰¾~à‹òue ’Tyw Uñ¥ÚÇЖ4Ø_ù$zZ 0`‰8â›ô^¡tËÎèâ7,Ú`°•ƒ£8—¼öø~!Ê-ˆ\ËÍÞ14RCbæÕ†K²Ù¬á=' b‹1ïªQÚUG€6zŠÞ#­òîWØIþŒ×ŒÁƒ‡˜¡çig ª:£e® ]¶4$zM*:?òpxö駬"sžLµiÅFÁe“Oö¸ï=W™ÈP¦j=î»ÿÀéè•‘Œ÷’yÂ`¬ä™Ÿ=ž•áŒoÝöp¶$Êw]qU¸ö†]×Q}ï?¾µ‹h´Ža]%É’ÝR[ÜÍR5Úc:ˆJ÷y:¾Ðç$OÃ"Í%—\>ò±ËzÒØ.`Øk:Høƒé¡ƒÒó¥#5 RÿºIÍ©[䢺cý!7÷îêÔCƒµhÃU Xòìý«ò|[Ö Ù]•#úŸîya£æøvYʰDbÑ'ÙU†2‰yã<`”(“´c°ÌÈ©]sžÌW5k¸´7²}”‡øÝå¡¿~i8ÎAY׉Iïúö·L¤Í’‚;4g6uÊ=whg·•¹1,ËJ/GEì—¢ôŒé3ܓϞ5'lg^qÁ‚p®¤¤™M³äÃp”@”a{ÛÁÀ;¿õm AŸ´’÷0MºŸ#&ŒòV2'¶éo)ŠáÒjvªÓtMÒd1ä¿K XÐȤßõïÈn«n¶üèGá'?þ±çh9&ø=š—½éS·X/v°†¸«V,?úXX¹l¹ÔÀZd ý°:0í—ªÓÖ 8Q+u¤(®ô]_ÏC2U#€ ÎÒœê¹ásŸÿÑZ´z%ºé)ž$Ÿ­UéÙzÆ€mãèÑ·n…ÁšèŒVÍ‘"Hã[·ÝÓî”Ú8®q”í3ZNÍ@ÃutL©û],¤NóO(Õ!5P_ÌÅОsÔ£y™ö+¾ƒçŸâ ûá­ô‘<Ö$zMÏË¥ŽµtÙ²ð£ÿÈæÆÞ¦í•–ô6CÐç5»yóæpP€ƒe¢5«Ö„+VXò<‡Y [(Cò¤ql.r™ìåv°ö­OÖˆ€Q*Mÿ勿꼦ÅÂþ|A{#5ßõ­Û=gHá«ÐêïqÔ(gŽ·Êió?çÎk¨×µÁñuǽµl^•5HvWËZhPÃUKr²-Aµº1 =xˆç¼D8ÈËÂ5ï û‰¦0}Ʊ¨X«a‡‰ëq‰Ð!T+O\C4|AÿíºoÔé‚;t|êO¬ú¾âUïy¯T‡‰Ò•K.MÛ%€Äß@o°bˆe ß¾ý›áÍÇ­\µ"L×÷XéM\î¬R(ëq ­j¢ô ÏDª„xîÄŽ7MâH‹3d=‚ ~EÀt¹q!½”>YÉòætW,9þt`K˜/¿újøßÿöo៸.¼[ó—³µÀÕ1a¬:¤&Y:o·£qÚ¯>söܰtÉëa‡¶bœìPÚ/•"4P-4+€¨—n l_u\(îO–®êE^èNÓ‹3šp¬’ú.éwEÁ+ÑÕ!醾,…z¬²£cÉÁht õKÊÊ55¦[\ºïËA¯ŒØ8Ã;Ú­ìin¾¯˜Êãý€Iˆmð:mjEmÑ}Ç3Œ¥_¸ÅÄÓ)‰ËCìø¢Kx?ÀˆúŽ3Rúî“NÊåɉº$Mæ÷»&âN«•ªeèg)ÂÆi9ËgT—$óöIÚ[…cê=Nâß~Û7£ X÷'ûX#rݸ.¨”-7%•–^õóJ]&àxbBýü¸ŸÁƒGæ.¤ÁHé&Pro¡T.8Y˜t•Wü¦SÃÓ·ð-Mü\«þWh‘çu\L¼¡Oz£¬<±U3o{¤ïú“?(¥õqîÀ–K"e·Sà¹oßþ¨~¤ÑÇs»­ð1Ê2DsÈÃ5¯nfÎP‘ð>—?=.ÊÔçžs¶ý:dðBõèD\’&óß'¦@9L²Iv3é*BÖM<(+ëìÓ¶ä¸<:9´„Oå¾yÛmáá‡q¾Èñ’€kL/ hÖˆÒŸ^¿!&FÍG”Ê—÷;‘{â±$$)ÈÖnôœÏ2 ˜ÒJ™ÒIϯïíráýì Š‹êºYª- •ÂcºÃ1g‰=¦ÑRRcmMmX/å{,«s0Üò¥Ë=”Gâ «e÷ñæ´@‹…¦E؇=sæÌðÞ÷½_8KCû¦N”M¸”¶zø¡n(ÿ¢Wtàš†ï´…ç#‘EÃàMz¾…Ú³6õ“é!æè~-º‘ ö:ÏuÆyÒ<-öA™½˜ Y[n¼éäï›ÈiOWÃL Fc^R ø§>Tïâê¶Â‰‚„Ë4‘}ÈJ±r˜9‡9Ο#d1¸™c˜ADTÈ@0ý4ü“=@·Ù»=šä„¾=F1¤ªÖÜ  ä¸ú ^EqŒ;ÂJa}ª³É‚!q)m¤òÒ&©óNæ H¾³§O£5ßEñ3ÙI_¥Æ2;"GV¸;¡$êÜ×’úî„…V8æ:6˜F·1Ýç™›ûR0¤¤y¿R2rÃ=yÎîóöC9°hd¥Î,rUôÙ™áVæ9`½(Õµu`ý`±7Oš¸éM¾tó#5šb³ø}jcRûÇúajw Ì5*Ë䃽êk7nÐÜ¡Ì×euXh]áXÜãçµbu^º²\1YS¤•ÌSò‡ªÚI‘ Ã$IÖùeÊ[1äÀ¹ ‰ö?à Ôè!=Ô $¦(\E003Cîvu# âVŽßü¨ˆj©¤Š:žùž©B´‹qߨDYL¡ï;æ! Ú¸?\sMLÎ)=ÈÕkVÇã´˜@¸¥´<þà$ÿHüy%Yq'Pê2YAºù€¸J€‚?/Îõ†ó¡w]2'-Ï[ñ>ÂEÊW6ÔŠ†–«í™ÇcÊâºUäµ0"½AŽð8].eŽ@Šá´êçgÏ>SP£\žwW^ÓÊ~§€ÐD’$Ž€#jé(+59CmŽfcÂ@w©NT9,&“Y‘L.¼Ÿa`! ÌãURyú=Ÿ9 –9¼LCp®¯Ë€Æöí;Ã^-ât\Ižüt:R"¤“L’ èA>¡4ksšfèh…QF…SÙŒ3øÃSôcprýžlò‚Ù¥²Hé G·¥è ›…wõå¾5@æÞ—FŠt†"?§3®Ôe>úH¸S§6*#HIuÌóɧôó“ú ¸àntò8×…®ø³Àø€lkBÕ”‹²ZàF§åzÉòÉ7˜·cWNÒŽ‘ªâ=qç]ƒttÓ|$s²§S8ÈçãtÞŸl*>yï5-,ÜüÒç>o¦(@ƒˆ¤Žúšù«fÀlrTF¢sÀ_y™ñ"Ò&g¶kŸ5+ݨ{àØK›Õ§ðÇRäQI¾úX0OÅÄüwÄ´(НOÓo¤Vß#0ª FÕÈ$§0kŽÚJå¹çd¥†¥ùHKY»¥ø#d@ Ožq²ä9óã®ñÑ¿ñËÔìY<ú5¶prZ#4î¼ãÛ¢#Ie’4™öp5çâ9U·ÐUËh‹ôjoڼɋ*ä—GÚÔ|5¤è…º9"e¤ÅÕu oû„¦3LW#t–‘wvªžAñÈá6ÀA/×4cºmè„B ý!ŠI˜Wh ˆ Æ3c‰a$M3nI ÇÜãO~š,¥˜?½ï ÓËc®Ää/ oˆyß:­NÓ)!w7KUËsÛzß©vÉÀ õ‘F2 ¹9lîŽÛo #d[‡–¾†‘&¢?»í£Æa‰Ã')’¨¸Ë³ÊMÇËè¤A×·ÂP›z QTk¥µ‹1šÛ*‹6À…ƒìŒ%¦1Y&‰/ ”G¢yË6kd,{„Ûe-f” Äj€$ ˜49¤—íÊÇã:Ö³fä·6M[Ú:ŒðÙ¶K囬—-EqÊ®C<æïN† ˜oóšÊm€TÄ Iƒ2æŸñKŽ\äAÓ÷Ýä­XW¨J©ã˦5ˆ'ÅÁbY]³9ÛEÙkýÉ믷ÞmJïT]é Æ|‡ ÍãØ"ùøãyÁûªÐ#¼P!A½LŽGF/’3›öîÛ«òQ—ÅÜ&p„[À{ÞÑÍ¢;Ê–XŒ»°hùGoWä¶VZtÜF4øÀvˆ‰†NÄŸ=õ’~ï·€§Ù òˆÂ#ò—Ý€?ÌålÑVµàf­rwhòžx àÓézÿPóOœÆ¸Cy™=k¦r‰ÌLÆåTp)–è)‚Jô<µ¿Ô¯ÿ²IºTßo$U-Õ/?`i¤aå ­Wg® ʯ˳ "~›Â§kjrê0ÍY§wÅ+CoéR —-[¢³w÷}÷»Å×§ð@„ÎJ;d´öÊ‚‘•ÂUH‰ì1?¢78@‹=èr"‰2Ônçë…p‰/¸Ïª×¼0RÆXØ@1rä)‘6Pf » ’4=é­Ÿý¥HLjíBau_ÊÌ£'¶3ˆÉy˜š}aI3ë^}ìIØ·gŸˆ±M,U<ëøTÈkZC IDAT£#$H‰áî;îO<õTؤ½Ü ´ßW/Lä>P‡8¾2»y*‚È©Ì#qG°,Ôú 'GY+Ïù ½ºLg$Ú,¹<çÛ—û<äïÓ·]¯)Îcé„p±nU¿º(‘á8ògÏ?^|áç9¥€LeÍwÊGŠ󵿔zÎHÓ>`ŠæsŽHhpwL¨Ñ­ ¦£nè®Å¨¶LŽ’Eûáɡٷ§´¨gDäÝÕÇ‘±7’ $0d°¶”4Y§+ ³@Øf]+è¶0ÇŽùŽ@ Q!9ÚéEÁ„šîwìÚ6ËÚÐ6ý±»¢{–ʾ=—[¶†%Zi_³zµvMò·˜×˜a—OudIiU¹ôĽÊãÂFIi3[+ïBùW*â,?û3½Í„E‡#tr‰‰¹¦ûô®ïkªp¦/â÷´C&2\ddk›©sKñÒöYa•sݪ’(ï=‡È 屘"ñ©«þÄi">Ä{µ†°dŒäG²(´eã&ÓÊá„DwÝEæHŽÿ‡ú¤ü8€4ØÓ}H«Ôëeÿu%€ÐÛ³!7{·‘,ÙYƒ~/fÿ86¤t¨MœÅÚâ):T0‰ÆVYt$ë4ÔÎëp¦pñ: A’aÌ[£7˜rZ óN )È FÇ¥+ž°›ÿeŒç·Y_Œyø=Ì÷ ¬>oÙº%ì‘zk }*]§†Jèc=¤S WYBf2çT#óH(“¤°äÌ\E@C䵸‹Ð8ª?Õž™Ððª-›|ÝÙ©Õ{}Ñ¡´4¥2FpJÌ_ 3ÄžÒɹS.úºfu­`X["ÏÔiRýa¨MƳbt‰Œ°))Ú;–7 iˆº‚ÎmO`ÇUL³K„¹âŠÿb¼îŒ•?ëŠæ6mÙ^{å%JÖÜ|@_j[cÖ‹d—hæ¢=î[èp$}K•Šüˆ¬ýõ×þÊÇoðà( Ëü%£*üYÝfÞ’¹H[‹ÊÑFwIÁP)’ùHxêD»t—Æ™î—êàLÏç ås¯ë‡Õ‡/ù7£D’Å;˜ ùLLDB„©£äbÒOÆ\|—ñPb%¾Ça©gÝú a³¤Ž À•ÎÙó$þ°˜°rù ã²\i¯ 3§Mó1O%¬î’òp5cƒŽî]<»B¢„Uq´-ݺ^ÃH¼õ“!ƒKäÂb[H¼'£Žã189áXC€3•ñFÒ€@S'”ÒHqõ€N£Û76rlºÀeó³Y»Ë;s¡®U‡Éz¥Î™Å ¨¦ºfJ#vB¨×TyÚã)Noüÿph"$0+ó<'tFP/Í2º¼4ÛĆü –IÙ!Í®)<¶Qi¿nŠÚ%[tÐ ¦ÛG‡Ñ’$md·K¨ýðÆ©÷ ®ˆ„ ø£Gxx@VMþú‘pG$˜Á¾úQ͘Ý#ÏÇ{yçÙ‡WmfýD6±\½GÃïÞŽœ…pO†ë”Žäâŋº m„EèBñI0éåïã3ž `œê@Œwt׎Ðñâ²Ð°z‰ü¨(*"×…€QZ©}¬¬À0ÖMŠÏ‘š‰ã݉ÿB hzªÃ|-ݺ}É,gjÛ •ò¥Qîëöí ïÙ¼/Œ•%€cý¥ï]Ǫ'ž=TUb¾+<>nݺµá%™({ò‰'­‰$¿žòîŽ÷GíÂJ5q•™³ù§6ˆ!1‚jÌEr^Í%éö‰A”7žüÀ=Nà¨E‹A»÷‡AÊ 0\õrIgU~ÞØ0ªŸd¥›AŸ#¡)€\añH~í’àÉm»ŽŸ Îv×#Öx`ÃÁ¤É‹ªXé0G‰¤÷F´„ Pîùª¬¨׈‹nj ëZá^åf~2JñêÌ•GŒ cÉb.hrTÚDø5hN«?ãutH£@Ëÿo57 A2¤‡F"VâFjõ÷Lý£?ù·q)8ÂG‰ D`ð9ñ^Ó ÂäÃE?Œ£¶‡|ÿ{:¼kr˜0q‚w&œ*‹Íƒeåˆ2Ât­’dï@GK ά¼“W…1Xfïý%3±ÒT;”¤6BU픨2ì( Ó3³O„i;"]¶1´oØjfN •S›ˆ^õ”…T‡ôYita?¾%ýâ<Ÿ?:ޤ&À0òb ¢ªüðOŽ»ü³W›É‰ÊCÅ| ÒyãÕÇj#¬¬Ú¡Ö¤avÝ`ÕÁȆ0Lï tzKšÄË@©œé "ÀȽ@‡o|îùUª<·IÊc£A;‡Òã¯JÚ’bú'òK05Ò"5ž¿ÿÿþÖ§:425A»×bØWR.eà T¢i:Ÿv)@òŽ”é:Ëfüø±aœþ’êOþ»tOyÝé&t Iû¸Ôpl¡:ï‹|.õA ‰Â•H¦á ˆÌsW0¼ˆK,ž !³¡›¿T]qñÊG’&µ‡Å”ÉÊR—`ÿxJÿdQ¥Œ²²ÂÈPгIR§s ˜9žý§Œr…ÉhПCÒK€9QﱈN íÈ U˜½:)’zPÚw„–×V…Aë6†ª¹3à±STqZ$Px€¨± .Y^¡ãЗÒ*>R88ÛФ DŠ“¸ä+_7¾Ï‹Ó†Ó+$yW`¼õhÐ=§Ï/yª¸"‡^0%JÙUÊ£@ówH‡Ì52ÁQ`™Ii•ÝÀ(Z‹p¬¼8ö"ý‘gΧ1=bþ¾;ùF¹vLĘh(‹Ú`K˜=͇Âë‹k¾°ÑÄK8Ú°¤ŽŽÒnê i»ýHÎgÂZ¹íc¡gܸña¬þÆh¸ÍÁ_´CwôVšÇþÄ_.a,H&ý4ˆ‰¦#ØœñÛ/¶1Z‹Ñ"ÃizLKЉñÔz<›NElúïÞ85j$ßô”®eG¸÷Þ{tÚÝ„0fÌ8ÙݦùÐâžî”—ôŹ2Îáð-:=±N{jqdÄè>¼ìê –Hz&’©þjj…B₆‰Å:e½À<¬JAb¦oÚ÷ ‡Ÿ=T5®Ó‘óBeVÏÝ¥ IÉùÑÕÒ¬$®¥CƒdCÚ}9Úˆ:ƒ1ã< r¤çTF·!?™ qéª-%aÀ±öP[‚l× Ö¥Á•ßWjÃÁ¥SCåÐÁ¦ƒ=Ú9Õ¬¹Eær™od¾ÙyÒb“Ow. ó z1ýdùàBRn¯6gÏýH¾¥~7\K?è‡ôñ;x°%|ý¯¾*ðçú@ºÅøÃ¨.# êŽþ:R¤Èš'z§16R(…ËäÇ#),ÚŒ@¢‰A‹î2Æ6p,H¦& ™ŸÁ >{Nùó_O?ýLh‘Éü‚ƒÞ¡ ø¶Àsôáò‘ELL-ÝD nfLBÛ¶m×µ×MXJ¯Zs\V”ÞHää¸ ãLJa:Êsÿþ½RŠCeg®Í9ÎN×›ìžgçHeð°XÏG4Dщj e”ô§@õ‚˜*…Ù¦³XØæØ¢ï‘X†ê}Û®CáðS?Õc†…Š™3CÕÈ1YådÒqë;ìtÇã¬úÃ| Ji§²¨ž=”¦4‘ê‡2ÀhźBGlÔl Z¶å‹’#a(Ë䕵aô‚Ñ¡S‡ÆQ^¢_º[—ކE‚ôŸŒê$âàDâ ©ÒvìhsÔφIúJ®'),½OW¾í.¬Z™D²Ý(ýØM[6ËÐă6Ö  1¹QZ(cš>Ìúñ]^ŠL­AݤªM~t˜óæÌÕáfãÄ lC¥ô·¢ð =Ó¸ôþ½˜0aœcçèÈÐ×"ØÑê„Øü D±xÑ®’ƒàD€Ôs’ x§p–&ôýýZéž1½É+‚5u5Ò-Ãl™˜TÌ~2zbÊ2Y‡z1\:y á”_1s†~Î3-ùNy·?Ï Å/õã!*…–Û¥—ŠÖP%ÿjýÕ*Rº“‰z­SP4 ×ÐW÷,iy#Të;äÇöíÚa²ýåÐ>V²g“†á#ÆO’j¿%LY®‘tuTÕ_5ò\˜TùR9|%³dYyŒ¥%pÔñ„É‘á=oÛ9=RW: ÊÏyÏŸ£=ΓÆ^Ô6ª>`ýù-Ë¡…1ºAYyAøîx3Ç®WG–Üñ´}>,<4Lœå!é8þµöhcé‘ÜaT)‘ïðCúEšdÅý æ.±J½Sœ¼£NbçC]ó(3u4FGçr`Ù[ÕQGÎ1ïRp’p ê0;/¢ýÍÿû·=tHa R!ü ZI„‘±2 !&‡‰ß&Š”g ”×Ã÷î¿/¬]»Flj°zûó„ŸÒ>Ñë Q££*Ř*x[EM¸dî˜P=m‚ý˜VФ‚:†ÎðÊ–fÓ¡¥º*¤“ìó æè“aZNî<^âÒ5/Á¡^öíoþ»¬ðÈ*8Ò¹òI¥’GªA0å\Üaémr&ó¬”'ÏðH‡ŠH¬%µ£†ú ,5m4A«ÚÃ%­²WÛi9ü[ë'_g¦äÌáhT&9ˆc¡ÆóÑé ŒÉÒB¥ƒÞ‰ü @¡€Vüæa(,9ê‡a)÷ü¥x8âaåšµ>)oŸöuCÀ'‹ÈÈs&N õ~V_ÉÎ@ ÓdŒc ˜ù0J¬üfås˜š5ž^5 Sÿ K3iZArL‹]¨EŽ][³ø‘Ш>·„È7*lÅ΃¡ã…Å¡õ埇Ã;·xî´]+­ž[œ1EzŒSr]†Ú¤¾)‚–â§B²À]+p¬?xÄ M»%p&ˆ ø ×Í*jÃ¥—NµSdž*阶‹> ÚîΕ›­-ÐcÖG\­âå,íFTä,­Sæ~ºD³´aÞµHvßþæðÔ“O¨Î#…bÄ‚¡q¬ŸØî”É’•÷f½Ø¦ãm½À¤Èò¹(½GÅÐÞlK‘“'‡ñ“Dcš2ð”•Òy+º9ÜN‹6Hn#C¶Ô³…oÄÈáá·÷÷4?ù9MЋE0‰™*€Vô PXÍ“a¯Rf´¿‹„Žƒé™RbÓí·ß®c\G[or°rÇ`éÉp0åš$İŶ™~¾Ê²“’‡™-çé0 ëbüýäoŸ”­À…{šC%Ü-ÇI’GuËŒÔH}uEEuxY+µË;eéZŸÑIðàbs¤®T÷øջÛCÍîuaÃä!áÕÑcCsíÐ8$DDÕÇ;Ýå>+# ÀJNü'ÿª#-a¤Vj‡ìóÊjEþS0¹T®€#SÊÓ Z¤sÕIß²#u¾©­Ò*¶vÊÕÜÂ+6ðùvƒ¥K8T’'TB‹d+P#OClÂSÔSëá¶ðçü‡š2’Qe$sûkE›EOJˆ‹‚ú¸ãýš[e˜mÈFT½¥hÐ ,´É)ìVËëü’æÉõ–Ÿ3åÝ€I*þähôÔ3§kCC}˜¨Éèysæ„E‹uð¼ÂrÉñŒ3kBq">ˆœÞ˜ ñôþ ü,aH÷Ì ž¢Ú-F¾ÿ{÷Ê:úHI~CMlµ'ç»q‰HgΚ-%ùÑaíš5ýsþb~‰·#Ô(BÈ)_’g1³¿ a“†ÖËFiâ~—TIÛœ ;¬Z‹J6 × ¯ ×HwR“¡VRöê8'ç©Ò9€ÖŠjýñ¬úÛÛÒvïÒªñÑC>ŲS T¡a/ÜP*Y±YˆàÝ-̳¡ØM|ïúÓòLü*÷Q5€¼]÷)ÏnS=TëfÈ< §5RŽîTGY)õÂwjXÊ¢Ò¯­ {4w2\-xˬ˜”¯;´V\ììtê€mH{:Ôî¹ë×-=7íe:  jÓú ê?JÝØŠÜ­Å›CS<ýu#dq|Œæ"§I?rÜ„ñš÷YàŸ·"@Ro$!šž\µ¤™ÆQ#ÂoåwÂç¾ðy«{ÀX€Œ-ÁÜñÙ`¨ ’U]ëéñÄGrþ&ÞFiR÷€&þ/¾òZ¸tù óÑÕD­¢¢RÇjˆÌáQÉÁ€¸øÝŸü{»ÎÓ¼ÑäI“¢E‹”Xdâ±ËòÆ}Ç®þH8Å@…!¸Qu÷k¸8cÐþP+ e¤¾ª1 +õõú;" Ò*xõ-Îú®åÝ +IÃ{¹é2‘äŽðo‹×…ŸmØ®oªC€31¿ÁQa(7yá9®ZkWÒŒ~HÀ…Bv‹Ts"PF°ü­>_0axøÊìñ¡ðÔ·šì´TÓ&?HØ”ß6eÁ†"W!…I×´SÖäq{ZŽ„}y™ïOÆ»RFjQmš¬3”Ï£ºè¯ƒB2Ü"´}üQï|A²†¾ø£ã&,WK–òce~ŸvÖl’ ?ôèTRK«J\ÝåÃs‘ êœðI’$'…Qš.¨R;'—:çôüV¹kà­Rb•³JŒ„úÌxI“sgÍ ¯-ÁâMtEàaÁx¦.¢–Õnƒ™üa<^ ³kð`¼€Ìýã?ý/¹‡j¸t¡ÿäòC«äן+RÚXéÉaçoû¶­:J +&p¤ .Fi‘e¢Às–²|P!¾;¡>ܲ¡YCSéaJ’¬Ò¡.ƒ¤OX)Æ© uh¼Z©a·%G@Zñx>X’M‡êxÃþ–ð÷/®;¤ÓÇËΑzHPö‚’®ÌaÒ_”HèÞ`vN¥Ü§½ÆÍ‡ $G)u#YŠñ„Ÿê¤Êg´èÏš.˜ØèŒ)wb&ÍÔ¹ ¾ì¤ÑwHòÀšf©V9Žªøâ½k±†ùw(åûl%µË©Ï¤ÑËqŒê“ºiëáïÿîoëhŽY^Ù&—Xg×+IìaU=U­Q? ³·l‘!h­Ò»ÓÉŠePå#9ßÇÛÂ3' Ži£í©žï&zMÓT¹ o¹[êê-ç`°Iu#.¿ó{¿ïs? $"´h!¦-J’T½qF¸Ü‹ã¿‹÷é™ïqÐ$»5þáÿ>,]º$lÖB’Là•&%@ ¶oç¹Q›=g¶vEŒó*0_‘ÇôçX?yÓ?üU„ÂÏÆ©¬<¼Ãc©Æ°65„ª¡5EI’Ã4»§© rØ©³¦ÅÃv’ÿÜ™0µ£¢ï.ßþì©EaG³†°NLy@²ÈÔ&©ݽC2¸Àg­’R|¼?h=ÅZ‘Ý.EðmúÛ¹{¯ýÐÀÌ&`èhi ¿óóeáÏ^\%ðSÚμ’d’Nˆr©á¸žU¶A¼×j¾UÇ©µ|éûÏ„ {ã±À±4oì—UgæñÆKõ¸1—tWÔ}þß?ü·ñaí«§ƒèY°ŠªjÕXj+è‘ô¶wËìÛf™îCºÌªãP,¥2¦GÎɤHÔ~FjȬýä²õ–¼}KJ’ÜqãþØðÎK/ ?úXF A Ñ_¼dA"P†¸Uy§ÞfŒû™³OÄ®ä®]{Â=ßýŽ%XpСc© E0ÿ{of×UÝù®T’5ZóP%©ͳåÙ€ ÛÐæK óBèt¦æõׯéN^:$„Énšä=ºÓ™ÈGh é$ØÁ8CÒ†´lc0Ø–-k´fk²f©4—jxÿßÚgß{êêÖ$«®$×ÞRÝsÎ>{:ëìý?kíµöÚïþ´™pp×jѳO?cë7nÏòA‰¸@A1„‡ÈÆSaàpßÁ±˜0»§6g£èÙ3R”Œ¬µY§A¨6WkE%³4æ‘9E2ñ܈ÜnßgoÚeÇ„¼ü—÷q Ñ›ËüXÙFÀä.’½Í ¼Ìz p¤Oțϳ¯´÷ȬçÿYÞ$®RåÑ>±ÿ€'½KœhMÇp¹Z!‘ø·¿ó¤½ Nôb†QRz]-“™ 3/¸X>œê ¢K›ýý7Ùdj%NÚ9nù!`_´×z2™ö ã8‘ýãqH•Ý»ö8§ gCñ,¼súB>¸—q¾³]Ô;ûØ«ó)‡ÞùI:!ÁjM°OÒ’«ßüÈ¿µ—e\Ϊ„0©ŽTÒ‰£8Eêñy!æ»8:Ð(1€’cà°èÃÜyQ;Þ¼q£›XŒ“Љ,õðÄÐ_ÎÃE~Ígª€ &M“ôåß»g— Ëul8Çâ ˆGšéB|–¾-”ðÔ)‰ÔuWÙ/ŒR[Uâ‹ÙˆÕµÌù™ýh_«=´q§„sÔsÇU.¢‚î ôz‚XØ­túX’ H®™Oc1bv2>Gv üî–öèÎ×ísfÙ¯.œé³€¼7DW·Uå½Õ°ÿíØÏ^ÛÏ~QÎGJB™¬¹Ïææ&?ýy§¼Ç|ºjM7ð‰9¦ÄSO>áë§«˜¦PŸm—ë5-vt…Ü$kús¸Ç4ý°[þ|vx¡eŸ©ôìE ÚLq8´`›<¥œ¡ šC$½çh@U©“1ïR/[°ßú­ß¶{î½Gã9 èx€ðVèÀWÌOÂÍRêõ’³Ÿi¹+±/}ùHëÚn9gGI(,Yæ‡Î У ]´l©½°j•ï»= ŽLÏZYxœB)Ž;@z*o›ÎJGŽâRF—=uì´½*¿‹wMgS57yºµÍÈçÕÖS¶vßa;ÙÆÚa…Œ\à ױlWÔx}*S·-DÂj™ qÁÈ®“'ñœ-‘º/’êÊ…SŒ¿^·ÅîÛ¸Ýæh#¸eÇYÓ8íÁ>ö”m<±Ó¾¹úUku''år_x@7JsÝ“'OqNÒMsúQ\ IŽMÁ -BøÄ|Üæ´ÌÑÇ┯{Ç™/bñY™^ CäÖy—¦  /ôbmö$Ó‘æ}UÏ;^¨h34šš›mš È™SÍbþ¼¯òÞŒ÷‡$H¢Œêll „PŸg‘ÁNÙuƒT‰ÀàX«k¸¦/}é¯ìw~ç?ù¦ñˆ7L¾3A^:XzëpîÌUy$H.]¶ÜfÍj´uë×ù†P¥xG9c™A›]•€Ç@¤# íLáÆöj~ìk[ƒ™L¬ÃI¤?R8™½2N(Ÿüá¾ç‰Gàž°ÑÏEß+ZÚÜðá!þ¢ûMsðW‰€a?Ëù0/cî¸k—ë9LŒú 4<ÿÉ“§í³÷~FÀUï›vy¿ÐÇ‹>̈*l‹ùý¥ÓƒÚkiçkþ¡ ï¶ûÓ’âÉ IDAT's¯ÇIYƒ&»¹©I ÙäÞ«X]Cp‰%'™dY‡ÜaHN:Ðéè0àóz£eÃ8IëSï÷ÿÀm]¬Öðu Tº8È @@7•ÓbATùH1C¶‹–.öwR-'åÏ™ÿ(F0b뇿ÿÆ7eútTôaõ‚2úyÐÔWk]xÀ’{ç$M–¢†m"Xz˜:ýK*-ýÜÄëziàghmvcS£Ì~fºG!@=…î’ ;e$×øfœ8i²ýÊoü¦:úä1AvdŒ;è•ôDuP –PFh P†8~ýL?ϯzÉî×ÒÅ-[¶ØA¹ù?«¹§¨üɪôÁ‘?/m;÷èØMsçH3ÙäN/|¤„êÈõã@UºXPøB† <¢¥NâãvǬQ!]ÏØN?ÆÝ"Ë\ÄzEqç$ŵ—á†Ê伬¢PØàsq¦l ±Áí l@ÀOC°GÎw¾õ-í²ø¢ÑVV0áÀoú ØF¢á&… ïýš‡DYãk·ã‹êe0UZ°`LÍê7Í6AÊšÁò¤ßæ\ÖI†$Hæ¹1Þbà3F“ü¬‡þô=ÿÙ·|ˆXÂÑÏ‹?Š  âšSÅ{ÿÔO•&Ò ÎLeÇ‚ÆÜï„O¥ô?~î§öw÷ßo›7mÖæQlîuÊÎ=¨]±½å’tüÁ 7jNiªŒ˜ÛtM{°¥ a¨Ó›¯Ó —<²Š =Ÿ~@†|¤Ïç ±Ù3Ç‹x ī޹Bi[P䈋ÌÀ£÷Ì—×Ý1ZîÈFY}6,.KƒMÛþ顇ì§ÏýÄ·bà~X‹O/“„":Žz«aBïƒi‰C‡´Ù˜æ!ãö±<?/m½bÙ2›-1›>Ó03ìËTš.] ôFË7-€%¾ú\ ڤɭ©i–½ãÖÛüv– ³,âåÂeÍOjp£q¦SÃ=”áʆ¬'Ÿú¡}ëÛß²7I|Ÿa06§ÀËæW¤‹c*‡´+W^çšúšLdÔy³ã<•ÛÙõPئpÌDkš—5±[®²8X6²[¶âE®àÂ)ôRþWLyÅœ1—7E[®.^¶¤Ï6gä•V_ÞÞeHÿè#ØO<¡üÓ¼ï@t€2ö%M9–¤Lñ –ïÕ’Ã-Û¶É-ßq} C_Žb4 ÈŸ—6h†LÆpÚÜ¢<ÌENð½´ëº‰Ò¨ÒgY<ŸçuÈ Q^¢GtÌ'è~Ó-.ÔŸ$-ìV~·¤—íEµgÍÔ¦Yê;}áŸ÷“³ê/?ü°}ÿûßÓF[ÓC6½G§=?‘ëä½!‚Ãeã¸b›ÆqL”Ë7”ã†,HòÒé¬z!DtÀ†¹ kï½÷?kÉ¢&²}äªÓª2†óžÙãB¬‹¸*CßM!=çñ< ˆƒÃƒë|ìÛ_ÿ¯ÿeëå‹r¿¶8©y*Œ ó—6GÐŒâ7mŽçË–/“ƒ‚ªP ô2çX:ØÎ®²ÅÒû8R`VhiÙçl±É}zù܆‹d£¬Yò߯ ß9­”yà¾ûì™§ž” NC¯É¬÷í†øzwþOôàݲä²·Ê?ä.m_Œ=$Âü»ÉŸ—kšñk®YîãsÄEÖ«ŸŒ• P,¿\žçߪ¡K:^ Qm±›(_€ ³ìsŸý/¾ ¼Ø cØQÞ,¯½—®Œ’<1]<2|â( ”ôÌd_ýÊ—mݺuöúëû´VYÎk‚105Mâ#@’n銕n€Ì>̯‡*Ö-ŽÜoT%M µîÇ41îüˆÂóOÊÜ­<]þe7 ô=¿´Ë2ú²7õtä’åK °žË;:®¥–+°µk_ÑŠ/9ÁPâTÙÓCýÁEò/¾_Ö¸ïÚµÓ6oÙìîéHÓß@×ÊxfÃ,WØ´Ìi‘¹ÒßÑ3Ï ô·¼¡”®ˆC驳gl"WFçI¸A¶ƒ¥Uó´á2 ê6xéÄBÅ0ß s (³q§â(™[*pRJîl¢ødÁç2uýÒË«í ñç¶A@¹g·ÖA·séòÇØ~Ž „êªv›—úúŒÓ‰yI$€OÏG(ƒaÏØ+;þÊFz–î?ÅGŠ]¼ŸîfEÞ"ÕïNbÄå1r„æô&ÚL)Aêµ^»¯Ã%!¾#>ÂØ2~é/ÿÂöïß/ÏP¸0þWÒäñ,”ô)\Æí”’fý«¯ºÒÆÊc¢ìØÛ`ž!ßx½Z¸p‘ƒä4‰÷¬ø"Äî@@7«rHz£ë @ì ñkÍC&xæ9z¬¾¼ ök¿ö뮹t‚d@áó‰9 Ñ­Ñt#6{1¸£ãá72ˆÔˆÕcÈ\vKÒ”·nÛføÿþ‘½² ÜãÊ—aœŸ;« h:ÔÕYcoyÛ->¯Šn*Ãô‡6ñçí„Cá$²û¤ébÆóntK.üÁ¼" Ù²r -p]6”¶©l¢Ë+rœ–µ"j7Ξ™5Œ ¹çß}ŒwÇ6°ÿßç>2H¿Î1Ì@«Åéòc …ÅÃŽ×vØÙÔb*Ë/¥DQÞè~‡yÈó"öüùsµdRb¶œëF¦ ¦Žc!^§c ÀÉrÀDcÈ|%[v6hÜsï½6\F¼qx†åBO `’"G6p|õ™tÇa¯ãHÆAF`ÀD®X>†ÁûÄmüÑ}N@¹Fë²wY«ŒŒÏj°EFnŒ§ãËÑj3Ḛ̈ÙCì°åxy^“3ì*û9u.\¹zÊ  ÜêûúÕ€‘›;'¨‘=#ƒ‹@š\¬Ò@A<·ø²‘'˜Ï|æ3¶êÅUöڎ׬U›Š‘èÅÀcI%GÒE‘.–·òÚk%v‹»Ñ ëÖÖ\³sÕÇla°–½QLR8ÓsÄyÝ쑼Ý~Ÿ2ôçÏ/ŠŸ´»Ç“¤í± Ü`›É2ûñ­´¤÷Ê{àãÇÇ’§Æ•ï‡ìSrš;£¾¡ðž€@¸D=xëþžxrÌÈà&«õžÏØÖíÛlýú Á³²Ü«é‰ƒ¤×®¼ÆÛ7Á|kžÓlãµ²Œ©¤úOš{úŸ|¥¤¿ÒÑõ‡ï¾áúc~±]k£×ãɼ\oÍÈCÖ|ð¢ôà Âè['…üÄùŸ208¼Rýøž¸ÁzöG?@×úÆ_#¤q6LÞÂuòÈÆ€0Ù¬Œ:U¶p¨ï•óU”?±dÒ–mzÙÈüSäÎ)„?…ìP¼™•@”Ò¡˜Hùò:çÃÀšmߦAÇ+!L–‰Ø|‰±o¿í6›ÕØèï€vól¬ŠácöãÿľðçbMZÙ‚-†g.AèÛU$cF)?(·hÁ~Ù¬ÇFwD{%Ï>”Þ¿@Ù-MM®Ä[¶d©-[¾Ü—Ž–’‰Ò¿RèHö@§^€wfuÇiÉâۮλ[NòÝ,Î v‹ÌÊŽéÀú&ÆÁ‘#I¬+r–€%i3ø[T¼¤õòlþÚöV/ÀDü'/í+”¡‰,(áä‰ã>Ÿ¹[ÞÐßTÃÃï…Õà3ÕÛ)ex9…C1uŽDô8çXLÎru@RÞmðä~¹Œ»§K²dÑûÅ~ÐWh±¼Gêì’ã ™ê|ýoÿÖžýá“NØÑVó§ŸÐx¯¹ 7èD8ï“r:¼iËf[«q±säÍå*ÊÛŸ2uŠ-]²Ä–耜¥)#¸Hú]š{,O·žbÃhêéîއ+#4ÁâØFɦlvS£ý»÷Q›­ÉoŸ—Œ {qnà{”¡?ŸÓ`/dŸ¯Þø€Q~.d=Ï£6pŒ  ׸FœÅ§?õ){饗m‡ó„æ©:,±½pž1¼õíï§3ßw¼‹àæÍ£­þSöqäãsfÅÇÇ å¾‘Ö^@ækïâù;—×9¶‘Ó5 ƒQöp¹0ƒþˆÌçÄÅ8tØîÑ;B“ÆÑ¤¿à Œ6ž*|ÀÔGd;‰³aœž¬“ˆ @†ôÅgïÏ€eŸì¥‹—øž8óÄéÎÔ†dx'*g>V,9õDÄI–¡ ÀH'&QVGÉF‘‰p8ʹsæÙOdÓxƒo¥sðñçÿp¯[! ÿ¨Ã¿ê~ 9Hê8cô”EÏ<ó´¼íÇ0UÓìy6¨¯ÅCŒÚTÊÞ/qNr¿Vg¸8_@7OÖûOÏK˜•žä¼»=Gô@(ç$0gá$%n÷¬çr+|‰b±ÜŒÝ~û;m†8{ÞÓ±c'ÜIÅýó7¾ ‚¸x‡´M?až’o¹É“ÝÒTC‡‘©×&™ø¼²v­7YÎ]\9ºÐSc<^Ư_¹Rý³Å–ÈÏèBí¨ÉÖ Ä§paH YŽnêqt`L0ÜÑ€-J8€aÚ‡ª#%~Ï;ßž”{}ö #¡{atÜ0 rñŠŒ6–˜æ|ü9ªNòàènŸHoÅ( à‡ø¶SÛ5<§¹JwÜ«,lÿY ×Âü©\j!þMÑÜäF‰m;äN+ &jè#ÄF«.ýïT/ÿàø"×Xš¤{†ìªjIÀd'DÙ‚.}dÖøCó›n¸É~îýïó§Ïÿág݆'Îþqñp_ç4ÒO°~m™‹=¨Ï6lب9ÈõNØìQÏ{9ðNé-úÅõRصˆ»].m;â6b7œ%}¹·r¨/‚v(-ýF $Œ”Ècg)t*@LfêØ5Rš`4\ûP/\¸Ø~(LÊû@È—£s°Á†à%7PçsU*‡ª„ ¥ÉÜ"u¢W.ÿCœsš˜˜IO=õ”½òÊj47*½î`0uRð`0ŒåA¹ÔÂÙo€aJ£AÔ˜ <°?4e—܈"ç®ry{:ígB@’MÁÎ$ñps9‡‰Úes¾\{ݵZl°Àžxüöm\¯Õ,ÐÕlD õ\NKŽþ™ÊúDö€ì‰©×†lƒ¸HßV³ŸòR }åFdSS³›ü,@² sê'Moá¼÷Ý[â!v/ä½ð.ßzåɈº¾*çégŸqñ(öÅ<68y‡Ö!(z4p¹†È!’ÂÐ2ãBÈ£¿Ô°»$±nà¨õ‰'~`­Úe¼übÖjT`ê÷;NœÄÖmÛµbc‡ê£”Ì\ùÉFQÙÁ¤<cÞ>ÅjúLJ·¯„f¸Ó[&'Çïùw»2ä§?yÎÛ¡7ïÂ’–“RÜ㜮bÅÚaâÃ:ì5k×H“½Ãç«I“ŸûC<údSc£ÚµÌ ™ ek[$ŠÞx) ÎÝi L?Q_ëé¶|ù ûÌ'>íó‚åÀãƒDÅA)$£#ø–Ôðx”†A…±9{–´É¯ œ ¯ùVÇÇ@œaF êe^‘„ÀÁÊü~Ø-ð{ÿçûR|ÒV¿²FËeª¤f‹»hœÝ(£çé.š1xãÀõ£§¢ÚŽYã¼}ˆÕ1:»[þ@¢øW>Eϱ<îÆçí9᥽3jôH¹4›"ÛÃFklj²íÛ¶É3øŽðND<èÉG¯@1¿Ö•îžq ^¼ÂûŒñ|Üx÷̵îڻמñE[¥=Ø9ÒÍÄptŒÕ‘sŽñÚ#ýà»Nï´±±ÉEÿ¥KÈÙ6YÖy€,æHgJ’J9åÃþ ­!òjywnjœeËÔY?õÉOk³&eÖã‹GeŠ£€Ð€bð‘H–ÒnãÈठw1ÍARi5Am!¹WÉ_Yö>ao”ª|»Ñ¯j [ël³¹óZl¦¸ hû òvÿeYûsPÖ <{™àmd.´Ü2e^̨«³uÚìL ½Ÿ“ ØQqí(ihrE^qx@öÈÖ•¿ÏVÙ<¾ºy“ýìÅÜ“ÏÙ3ì+.^1÷¬ F®Ke¼ŽG¤†ë%b755¹’f‘L‘š[æÚ$íSã–ó¡SYç½D’P‰*ºÎ'ÛDÙÎa²\bϽŸ¹×&h^ˆ8HôŸÀSwH…øŽlž²MéÔ‰“ÎUÂÅDŽ$€s!‡—æÜ%e©lÀ´ºq½Ë^кo«®³wÜö›«ÍŸÆÈ(Þ2mÑâ¾!NbDÉ‘ûñ¯äV—Ù£óøþW&¡ÓCåÒžË1@¯ñ㯖W¨™¶òÚëœÖ¯‹+ €(zgÿh{|ÿØI*x]æA(g^Xõ¢+jØ&ο4ĘxŒ È5]…ãUW°o¸ALÐb¯0´Ølæ5fìhÿXשŸ¤pq)ßÃÅ-õM^ZäÚxÌ8ß7 H—#ÓÆÆF[¢mE?®}¼Ù¨>@Pàƒ¨Ë RÈFÓy¸ x¸D ØÛ4uVZPö½!¹s•1p¾’nÖÓfеÁ˃ËT ÞcÇ[íOþëç}ûÙùÈœ&í,S¤s@%¡.¼Mþ£ëÒà‰K#ûqÝSy1«Ê…³u$®¯ô1_…£G²©z—¼Û¹óæiyè6Ûºu«¿Œ÷(àïOÇÅ=nWºç_xÞÅë£ÇZÝÙIѾ!‚%éÈ3Fó¯[y­Í×|èJ‰ÚË4½C{&LšTð°ßS™ôÙ.Œé³stóùÁòEÑ»Fâöžìc³¯|YžÆ5Á@®2dÀžr`õÁ¤Î ·rZ@‰Ñ1ö™ –ê.!£”eÀf4ãp¤Ì öánpÛm·Ú+rê»Y j0«½ *Äüxž¼çf·Kt³ò圗PEûf7ú‘¥´ˆJ^_q‘ .ôj7®ß¨#æ%0%¦< ßé3§}5*vÈQî1yr ¢µSØóôgŠ€p‰Äê9--®$œ§vÔkê¿ñCÝ[™½õÙÞò¥{…¡•Hq1)PŽ9Ê÷šákÿï?úìz(?+IvÝ¢p(JÖð¢ùÆùÁa̓áb˜là0BÎ<$Ê/N×” OÃ2E`ðU*ÿó÷¿/Nw¼8¡ùÖÔØ¨ý­³:c“B¦ÐÎc|·6õ~áó¥4ÂR&-efœcéݘ%Kï_ªëáÃëlŠlOçÊãÍo{«¼*ñ½Ò¡2Š2è žÕû9¨åŸôAüé /Øj-/ăOØ‹¦gbö&ÎéûhõòW¹\N*.\ ܵÚ&b¹{B/ç8÷RÑèÍ\oâ$áíb—Çw1ƒ¹A‚|äßÊ®òïíÑÇwÐ+T›­§aäñ€% &²MJsç°«ltnp›¾z'r‘”âp”ž¹Ìjq¶ÿôÿhwÞu—ÝüÖ[ìU™ lÛ¶ÕªÔV{Ãí©…÷pÒ²eevãUL7®3+–çð)‰ mGÍ{#ÑãÆó(™:!üäGÏډǮ|L{Μ:m{dÎÃþ3¯íÜé列{3@/Ï@S^© ì=„šJi–sŠóø‚ŒÄçJÔ®o¨·á#†“5… P ä ™PÂQ¢$AãŒÿǺ_þ°-—]ÛŸþéŸJë|®[Í`8±!ð'‘! "1¿Ì}uu ô4ÒNvžörp^€îAñ>âp¨—ù28|b®góøcÙwÞé®ü7mÛf;””¥"U?-`è+‹¨c Щ[(¹ìvï]À»m¤¶Š½þ¦›¼û¥ˆá=¹bFFá‡òí¶¿¶Óˬ÷v¥Š¸EÀ° ½À’ø{Ì¿°lH½@ʶ¹òæ4oÞmP7Éwó,É’.‘ $¸6ðb2hâ*ÅíM•ÈT+ D;ùñ?ø„ýå_þ¹ö×Ö€ÊSHšÏÃýxî:»Õv.¬yÆðH)ð–MÀÞ²J6œ@b÷÷´Ï3 yí 7j­ð&Û©Õ"ŽMÌ_4ôj<{!WgoÅå’Uô—hh´çÌcÓ´Òfë–-N³S2Ï:¦% líº]Ë=÷î}ݧDÎr¡|l˜ï*½Ç>Þ+Ô»pÑBä"klžm㯟²q9.ä Ø÷´@¹±¹8L…XG=Qua?‰(þ¿÷q{àoتկøœ"Í(Bañ,r”¥ÍtÞÒGW‡u±«<+g xÃ,GHcB`Dqã"·DÿW­²§Ÿ|ÒÞrÛÛe:Ò$ môAÜÏÄîR¯´òžÐ,kö@Áñ¼âõðç+½Qék=çD­^iÔJ¥%2ëÂõ%‚ïÙ÷º\Ðís¥Ì®={ýƒT*ZGα·&—¦áM×Çt¡8ÇYÚ}q‘Múì¦Fm[;V.ú’ˆÝ=ë^ÉA ,Ë qs.|;"z”Ãä™eÊq”xhÙ  ž¾øÅ/fâ7(ˆ_®"\ÆëxÔ-¿ëiôÃr·ªªN)N¹¿I¼ÿÀU"Šcc X2™'ýþ÷ÿÙA3’Í›6i/ç]Êç•N¥¡\\iÚ0@α´ˆ î ™ åÁ.q€~øëljÒª­Ïß©¹ÆøÎw ³ž=²‘d JµrÏ]ÊÆG‰À1qÄ멪/ìj¸ÀZæÌÑõ'¯CHh¨©/iª#Õ*sL 9tF̆æa¤íÈã|ìØQËF%Žo´8„OHüþ‚öb>tøp¡5¥ÅîÒø!ð]`›ŽU«'µ¯Íi)漢‰~À™mnŸþg¶nÍjÙr.³Ÿ=ÿ¼/_Ü«ý½}å…ä9ɾRÉËDáA®à“ñWãñ&iµÙOûËr‡ö¬”6í²ì ûz\À³ GŽ”x-ÿˆ×ó¥Áž7w®5ÊX|üøqšk«ô"²@”Ð@âC4~¤=CúyÃH®ÒÞ0 Ï/ b _þaMuv.ÓDîQ£FÛh 6gZ(¿„G5Gɦõ¾ûüb{Éc—Âíàq¦ÍgƒsœÆž={Æn}ûÛU¦œ÷jý1skay¤¢h3M-\ûí¢ñê÷.28Òf¶¶ÀŒÆ•ZÔs 4jyã7Ú?ôKjIµ}ô£§vªÌ7 Ð+mn¹¸˜'¦e:¤AK¯½f¥¸GÙ?ÊÍ{ÒÀINТ> ¤ ËcîÞL ¤pq)8É‹K쥮 ™$ešï«FŒ‡9Î6Iüýúý_w®PÉ IDAT°Œh^ÅÕSX^ÚÅ%ód”;à‰ág5o‰÷ò1£ÇØc2EúÀ~ÉVh€®zþEß™oÿþ}bE» Š>H³Jcù€{jK_ñb­CJ®ý¿?d_OÚWéçþq‘3´2i欰Öý7í_ÛQ­ Éàj•ã»S1Ÿ+p’£õ–jUÖtiÍ›Ƙö467ʼ§ÁP!qúâ»—œ®ƒ $ƒªý,3¬”¨ÑœÓÕV'qx䨑râ:^À9Ù{ô{F[3 )…RÐŒ\dÆ´:XF@}º$bs;JL“àÔð@óÇüß4úWÆžÌëׯ³}RHÎ˧RNbYK¬ŽË$©Îÿbe^ó¥ùÁ0Ÿ6 ¬›o¾Å9û—åŸ3=‹1½b1•, 4åÑ$ÛÇææWᙩ¹¥É÷äž(#ê4-’d>_:¿tH yéh_¨™A1fŒöÒÖq¬”9ˆZhU¯¿î{à›ß‰É΂œLyÐS°yl‰N¡ò8É~lÇ.²Êž–{¿öVy«öfÙä)$Žè[LÊ$øºj¹Ê"ÀqïÒ¡1{˜˜o¤Ì‹•‡µ3gÎr±wò”©öeyQÚ+ÛÈ|ˆôÎÓ>žOË9J³ RÀ N×ký<ÚkÄêYLœ!³ Jµ¥”»ô× $/Á;Èk(áìGiÒx_åÛB Ó–ˆÞ“´^—œ¶hÇ<`sŠÒfGPC1ÜWÃà`×eÇå4ã?~ôÿ¶ø¦‹|õÓëíð@P èåsª:gUÎ ±LG ËŠÌ—çpßh]šŸe•ìOÏÈ›n¹Ef?UnªUîÃ@‘ÖëÓ¡¯HF²óªF1ƒ ½©ÜYß––fq-®¹f«W”jlëûÁy¤ˆKJ’—€üÌ3E³ F(Ù•‘A5\ÛÖbÆ3Q\å4™…L™2Mk¯¶çeçÈrÄòƒ³Ü@ŽéòÇ|ž—_‘¦ê|çwÚ–M›m·ö\9|è@’‚³ Á"6c3ó…öq^Ê9æ“{ijÔÀKÍ—rqÎÙ1»¹±ÉäÄö¯Ý6nÞR¶ð@™æè. ¥¡V ºÓ§Ù­–™©2›±e› 9AÓ*س"~óØÒúÒõàP äàе×Rã’Ř(’ÂØÕ%ƒê«%ž1p–Á<å”É“ím·ÞfßøÆ7´—ö_nàEÐ냌uqŒÀÇùñã'ìÿô»OÜ䜹-¶fM½$uØ*ÅBYoàHýx† dHÏỷ©ï¨vLœ0Ñfk˜r$Èýí}_ ØC­ñÃßÉØ¹ÅÏ’E‹ý‡Ñ–æf›¥ã4‹c;\ö«|,yç„ü‡Ò#zøIŠœ3ˆÑ $‘¸=A±ô~~07ÅbûüŽ=ʵÒS–S§N“sŠmöÐCÿàFÍx !äjiÙ½]¿¸zµØÛïx§m~nr—¸I‰Ý*w  ëé/8Æô#0Æcþ^%Îñ9]mæ —,_f?”À+ëÖ—¾ˆà?L|xxWËuÙ’ÅK|›‡™3¬±±I›†Í²é õâ'¸tÀ» ñÇ>Ápô<_‰gNuôM’}Óè’¥`6ÇŽ“\þ)á,q²Êòµí«¼s×köÐ?üØý>(S”ùž„›<³”ÃŒb9ð¤­OÑÒ‰c+ 1¹³rJÓ æ5MBY†A÷ùj´þâÏÿÌW2•«7‚#÷ à(Î3ˆ3%Rc>4M8wJ¡÷Æœs ãu<&€Œ”¸|Ž $/ŸwQ¶%ùÁÄCŒœq†gši3¦Yý®é>°wÈÍÃÿ“s–mçXbB~0Óþ¬ÒÜ$«‚Þùîwkn³ŒËá&‰› ¶–Yq:”ƒGÊq튊geÀ±xóÒžÐ<06‹xܹù­oÕRÍÝö²¼&Eú•ûØà©MøbÀQbz}ƒì*²ÌiN“‘ø„‰û¢5OØ_±úÒR#Õž§@É<5®€s¸Ö€3ðà*ñ 3Cî´Ø’¡QŠÖa?òèÿÑ:ã]¾‘XO }ä,1”þµÈç&á&×¾7yHéÙ.8:JcùO•½í ¼§Iâ"·–9MþømiýÏÈÃx|þ–Ðk„¬°g] çp‰3ˆx ¯o˜îïíø(‹_•Í;æ››ÿèåãÓùåK’—ï»)۲ȉ–£äCÀDÀÀ<".Ƶ¨MÚFv·mÛ±Ý~øôí°–;âS2â —"`¾¨mN w¼ûmï°UÊ¡ÝZ*y¨ \¹À.xÉ‹Ž^¡~˜ÅûzG—ÚYa€Äcbñ¢…‹¤å¿Ë›´f½¶ÛÈ} ð+ɼ0ëªñR>I8Ÿ¨×”Ç4ÍOW?Qœ#KM݉‰æSxsP äöK9À Z“†O¶Ñ 'O²ú#õÖØ¤¥nò¸LÆËxÌ~ìñGÝçá)­ãv®-{n8£h¾ÂFUÿꃴû¾y¿Ûò͘Q/Ÿ—p“!Dp”Izˆè Ìzg>cq ©_ À1Óí纃y1ñàM?ÁêgÈì§©Qíh³ð—­õøq)ªl¸¦7ÐTÏ“ÏqíÓ¦Mu±|²rªÎ'k~xœâùP±>?ƒÞ{ºT%'¼T”¿Hõ2á*á0Q ‚³jg¼æÈX{|ìè-7<  ¤šuܧ¿ýö´¸Ë}øæb0K¸Jæ&;»jíÎ÷¼Ç½íÉq“¾±˜Ò€eqP‘¤`©€<£¥’g´….N.*Fè3]+`Ø?æ®»ïöÆV9"ÆÁî|‰Ó¬£fnq²®§Ë^ušì9gšcÌX‰Ô2Ïbž¸·P0íê-QºwÙR äeûjznXt‡Eïh˜mîðÉÀf£(Äð)fÍWxØ7ªZ @`êZ]ó“ç~ìKîp«²qÕ¯Ènòþo>hsšmÄIòyÙ2ž¹ˆ ÷Üv¿Ê*ppRþ6ÈsÉg÷*ÙöTWå(@²r´”šâX~ÀF[»|€P'®ÎrRÛ9ä=i'´=íqíÓrìØQæ;¦¿ë¯»^Üä{ø»ßµ»þçm¾4ÝëÖjM·ì&µï¢Dr™æÞbˆ Ù+YŽäÅg&‰G¢3gÅEŠ›¬t@£í®ÊÄEÖ iÛ´F~nK“‰+3ü|öGœ®t»S}•¥@ÉÊÒû²¨ÍS  —Ô6¹Í·|@Ü=uò„µJysLœ¦äkoëïy·mÙ¶Õö½¾ÇEôŽLŽsypŒq…‡,G.Ã(ŠšN£æ"ÅAF‡À…|8a/m4Ó,¼ó=w{Ðc¢´ÖÌíæçó\zš–ª¸Ì(@ò2{!•nNLêmoŸ yÁsîÉüŒ<—Ô*œÏÓöëeX}@"wPÍ0u0#¹‰ÉP1Žñ4*ˆH‡6“$Dl¸ÈööÊ*khmœoÄáÄp9"&0÷X:˜ÒI3¤Hé×ßýáÝù»J\fgÍD—ªQØÜ~Ç»d7¹É<ûöæ&,ƒ™ø1WlÑdsÌ}¢˜]y€ÄÈ›%K´]ÂϽï}|dԤɄ2 –ü”Eî±Òé¢@R» ¡—Ý×£Fí¸2[Ž(©[™ˆgÿV• ØÀ“FkÔýü9u€•† b¸"šH7oC›-1»§}ªC†‹ÿ œO–-)«kÍ›ïó¢þ¼€·ÄÿJ)@²”"Cø:ÏIU ÝÔHGïfi|—.]"mp½8Ìš åÎ8Çèæ Ò±Eép†º "¶VÔ´·µ[›öGaSi›HÚdz´ö5Ú"öö;ïp 5ÆôpŒi-(”B)H–R$]»A5dpS#i¡£È¹BÀ‚ Qµ¥ ësB”Ì3ŽÝ8G®å\¥æ Ûµ¢æì¹6;­¿sÌCfy)¦ùS\Íá„b¡<ýpM€Sfî1ZT¢-©Ž+‡ $¯œwuQ[Z­U*‘XøDÀÈÉŠk®±ùZžÇJVúÀ5†?„òÈ=†’"÷ˆDæ Ïeéöâ&+-fÓª±ãÆÈ¡îl[´x±½Eûúà"kå$ι·ò4ë-mº÷æ¢@ï=ãÍõ¬éirȋ֑SÌÝ.œ–¬“Îò¦›nômj4o‰Ó‡Rv0pŽ{Ħ‘š9ȳímn47*©À 8Y+fšššíºk¯uÀóvŸ ØmúQ\e¹ò4+—&Ž9)@òÍù^/êS¡ñ( ‹—.“ÏÅ…¾¡•P£€‘ްe:Á£9íR„`ÞÃü#Jšd»À(ˆåµ‰}ÆrÂYr%7þ<[&ŽÀ‹àOæø¡H@Ø')‡\‚’Cî•ì#8æòwÜn‹-’ƒ‡qG‰Hí“*.±;Èsúƒ‹tï>:Gdí’y¥C­ì ™‹œÓ<Çn¹ùæ@òL„øŒ•nWªïÊ @É+ã=U¼•qþ ‰s–(g̘jóçÎõ-$Ü$H­ó¹É áÌEvˆ£ìl—r$ÓyWúaðÿØÒÜ¢=¯Û\)lx¶´%žÇç¬tûR}—7H^Þïç’´‰b'爢‘ÛP:;«íýÚgÅŠ•ò04Õjµ-ª‹×JëŽsŠœ»¸ ©xÄl×’Tø‰FÉ䇽®‘ˆý ò• ÓŸ->SlR¹ãu:& @’©œG"ÞA\9Sä¶bà‰ÝäL˜Wa¤ØU¡¬AÜætüù\åy5 nD­ÚU/MÓ Ôé–{Ð/X93C‰cŽ[Þz‹}àCr1»NJ&Bþ™âs¦ŠS Ä¥(@2õ^)½ ÅÆ%|–¿÷½wÛ)¹Wcy¢þÛ>yò8šd?œJšüÔÕ @ÎÐv-vë­·ÚûÞÿ~o¶o+ñ›Pö™üNï÷²$é0Ä(P¥L¿N!Q`@@L\bìº5kíÿáÛ¶vÝ:mk»Ó=• ´ÞŽœ*}à‰i ûѰ›aƒ@ò†[n¶Ÿ—‡Ÿ«å@7òíÎǧóDž(@²'ʤøQ­ñ–Í[ì{ÿô][µj•m—èÇóýÚ»{0— «©•óàqÖ0³A&IÓÅAα•׬°;ï~oÞº?KȽҔ8£@ÉÔ.˜ù¹=¨ªºÆNž:!Žò!mù°F@¹ÃvïÞížÎ?a¥ÛÙ^hňú#äYœ]!¤¹ž®Íºhgæ¦Fq·¸")–ÛˆR†9Ç”‘2éØ_ $ì/¥Rºnˆà“xν5¯¬¶U/¼h[·lµ×vî´#GÛQq–§´¯Î©S§ì„þH-eo¡ZŽ4ð:T'[ÌaÆ1£FÙxí“ ÷X?½ÞZ´Ï̆{×»ßíÀYS]ëÎ9bûbÙ$ãu:& ô— $ûK©”®,"0Æ›\ÃQªcyÔÖ­[ló¦Í¶ëµ×l·¼š¿þú>í~ÔEqÖuãˆIK€Ó•=± å­]Ùw¸<¥ãr̘±6A›w±ÍëTm+‘ø%Ë KÁ±´}±øtLè/Hö—R)Ýy(åÎ" aöƒ‡æ) pNžêN¦ï²Æ–¦nëª#øÅ2áê*ù¦ÔIuu÷ùÆP.(]´ró}½…ÿÈúK’ý¥TJwÑ(àœ¦VçtÔÈÞ’ ¸ªjëðº:ÙFA>,;¥høÚÎZMÝð‹Vw*(Q` (~fš3¥O¸@ °õƒpѪ;‡9 ®ÿõ÷ÙÙ=‡­j˜8<‰æðy]Js¦Í–<ò¤®à@cWÍŸ+:…DA¦@ìyƒ\M*>Q H”:rVfm:ŽVfm[[}nÒQRIY½3îÖ&yªµÚ.9ÊÀ‹‹ÊÅrÒY¢@%(&g*AåTG7 N3g9…N§üMŠcdö°ZÜ#Gß°V(Y;m²ÕÕ R«Yà¨;ÇSH¨$HV’Ú©.§@Ôz#vwV‰—¨ôš»ì@7;#Cõš‚Ç£ÒO¢À S ä “8UPJ n·»èÜúÜ8-w1›tÎErHžÞºW|;5/‰â¦ËêÄY¦(Pi $¬4ÅS}R¾z²Ô ™/­FãT ̤æ"AMý?ýêA«Ö~ݤõ5ÜŽs/…D R d‰ª +gdú£7Ǻ]Ü#ðˆÂFs‘ì&6(ì:ÛeÇ_|Ò¹KWò,£³_Ï~*@’ rª¢„rlQÕ%·j/êXo-@’Ò¥ËÄ êÆ¡‡¿ æ"ÕžtÄÒür Ó…ôìÖãvbçÖPŽ·Nß@)íÓª¶ÊrÓ÷Û©b‚4…DH我Njk«­ëœœÜŽ.2׸­b¿¹8äeÒw€@ 5ˆÞZ³}â°µþðUGB@Ìs·J$ÃEº¸í¹HŒÊ?þ¿ÌhÏE bp±¿¬øƒû´­Klm¾¥þTÈC’WÌ«ªdCÛíÔ–Wœ» \% Rãsxøì+Èos¢pl5B¼‰;ø˜öýÝ­ó´„h‰¹Ì7Š@±¯ßÖ5l$s”üósýšG}\e\€¤-€v‡øTWuòLµî†-ÍIöõv‡Þý’Cï÷ã‰kíÄ«¯Xû¡Ýrd¨–"ÇNs•}O'Ðq07$fmçÎØ¿{ØEmDéNmÍ@p^S=‘yIæ(‹6“Áù.)N>¿ÛN¬~F©/†ö {‡{:GAeÙ3ÑÎjëû#àN?C† $‡Ì«ØƒŽœ¿Üö~õO\C% wV XJM¾°wPº¼þ÷ZÛž3^Ž+iˆp“@¥¥P $À]r'Šßr+i{¾ø…Läö$oà'ã"1N—'">púˆ~ø7PnÊúf¤@É7ã[}ƒÏÄ4âè¹+ìøVÙ‰õ/ èy££L÷¸ºp—èZìA‡|pl„ÖÇ´}÷=!ô ]I£"8‚–;K]×-}nRi‚§ËŽ>¾Ñ|û+ž>üh1y+÷¦äb9mà<›‡¶rÍ_-…Ë8ÀAvÙoë~õ—¬¦yAIIér¨S äPïežŸyGæ¯ZÚb;ÿ÷ìLëA›Lv&bé¢Aw{{Ðx:h‡ pž(A\äV9øŽ<ö³'mÓï~ÞÚÏ`v#îPéKž¾ 7››„‹@9:xªw’Ž”‘ßÜù‡_µò1ÙÙ¡6j®³FÞ1|^Qi:»ª9ìí:¨ñlP½}â ]s/mvÛÎ ¶î—Áj§L´q‹V*>…D"Hi‘Î2 tb.­ïÄ_ü;±áˆíø¿a]­ìh(¯†.ŒÄQÊÀÉDGº®\ –öÎZ;øø·lóoRûØHääEΑsDit5ÑÔ@ðœƒT-vTJPZö¾é8Ña>ò1ÙZ~ÛA»½3táÎv”A(—2€V–ŽܬRÀœ•3º>ôÝ¿¶µü×vJ›‘5ýÖ’·ô4$œHé§@õIºi ‰Ý)Ðy®Íª‡UÛKï}—ÝxÔFÍg³ÿÛçmŒ8-·w„åSˆ\\&!ØWšÜ±Á^ÿ¯Ÿ³#lPzu1)=M3€žNP¨s€88EÅ‹…„‹d¾Ò½ŒüW"ç2ÉŠ¸Î}•7ñçVXýü¸]5kµË6³V«rØç"àmrð­†GĽõ™ïÙž¯~ÙŽ>»Ý¹Þi¿q»5|ìTùˆÀ«ª @ÉÔÊP r­ÏÑïümùý/8HÕŠ‹ýŽy6íC¶Ñ·¼×¸†“t%l >õ¿íÐãÙñ¬µv™ú8Ç(tµÐ€sÀsàËâ©3G_…šf€ .:°f×ÞXaI°¯$ç¨k¦ÙÕ·Þd“Þÿa«›>7€£î9pRvWP:]óŒ~ì{vìéŸÙ±µdÌ@xÔ ¶ø>hu#…ü^WúòH 9ä»@OÐJ‰ž]šÎ[÷Á;íôꃕåëÑ' .‰³Íc¬ªN$3ŸÎÓçìÜŽ.:lÃôCzDdç …h)ù£’¦S7¨”pû¶f;þÃ-ÂGPPuè/j·½•T*pÕ]n»èŽ`OÊp¬S+ eUcFÙ°éã­sïk?qÒÎ:mƒyy3Æ×FÎk þæïlØäiŠ¥´ðÐI ‰N’©#œGÀ¬CU­æ÷=[Ÿýg[ÿ‘Oɹ#ZçN G#ðŽÅ Œtàâ:xW¹ œÃœãžgØØZ[øíûìõ¯ü©øæsïšðŒƒ¤\Ô/”SN•EœsšY™àÐÒ:²çÕóè.®•¶\2Á~ékV5µQ‘°¶š7­éÛTÅ¥0„(ïÓCè±Ó£öFæòjk:%®"ÐÖÚ˜›ï¶Y¿ûËŽm‚?:'â8çÑTMt,¹Ü9^~`áHH‘¦VçÕuUÖpïGíªÆ6ûsÿæþ_wa¾è%+‰—§dÎý¡W‘~FaMRú6´Dƒu’zhY˜Ç$Sum•MùÕÛláýYÍ´FÝ8â¯RFåhÍSHÈS q’yj¤s§šâj±](:WP‚ÈhÛ½¿£3O»¸se€žððŠ –KƒsÐKIìGæÒ^Õ8Úê?ñ»6ñïsq<*€Z×ÿÌöñ¿ÛÑÇ6JävŽx”«¶yÃT¤ÇSc(>\“\ðöôZ݃%PÝÒñ6ç3h#W\§ç§#=RH(C’eˆ2Ô£ÚÑgŽ‚v8ó®5Îû¾õUÛóÇ_³ö£×¤\üõ«NÎÉdS€ „œôt«–±6å=o³é2çé¬+1G¡LìÞqyqæàkýÁwìè“OÙ©WwɦñTVB A@ôœ[*¤ì¾Ì…tÆ,cÕ¸Z{C‹Mù—¿`ãnÿEü6¡zIePÎóR£kóÜé()@2R"»Q€yI¸:8,>k?!-IDATŒ´‹Î%´?Íέ¶÷?+ó™õÖy `S:N‘Ó ÀH(²îˆIÃmÄœi6¼¥É&¼ó.sÓ]M‰Á™f?•Ø5ˆÇV%¤ª¡POë8gGúˆÝ°Ñ:ϲ¶Ö£Zê¸Ï9J5WÀиväU6zN³æ¯·Q7¼Kå Ä…çb…Pn;ˆðœ’¸io ‰9 $Ì# °ªÆÕ08ÞU¡±Ñ9æ>p˜EŽ«ÝNþô ÙDn¶¶ƒ2©JÉ…²ÚÆ-^$²9”·‘ó|î€Bl—J\àëT]¾^G^†¬’Áge¬ðУ Ê¶Žš,>˧j"pÓ^å‡u¨ã~ ioÔX °•ŽçðgRªJéÂ3á®û³ysÒO¢ý!(…DR àl—e„õuƒ8AÍé9È^2Ü–¥¹$ñNá‘&³–,æÅVÖV‡Õ8RøHýÇ]Y4îîì:ãuÀ…zbÁ "<ð˨ÒG`¹!:á‚k}é¡G~²¤^/Î7ò€Y-î’6ÛYÈ–N†0Š=|!=zw $Ai3Bâk— GŒÍ–vàO’•-Jã)N0E+Œ¨[ä2X±¶DŒŽ.H ì2Îî¼p|žWå…2Šë°8Nð‹[,ÁQõŠ# Ë åRuVÓ&U pç Æ9W8ÈÐ&¥E^O!Q „‰“,!Hº ×ÇP! džÓ‹iIA«^'EnÖ1ˆ»°Jä¬"@q€½\äèŠç±Þ|žx9Â8%˜¶éæˆÌ¹E·v¸:'Ôíù°$ó£äç˜B¢@¤@ÉH‰t,P0 Jæÿâ^×EåÍÉp8Àlc7`·©>l‘û£ð Æ ôXÉã@ çÙ¡yÈêL4.blLÆxÄÇzJ/[ë´óârñ9B®žÊ"?!ˆþ\0™B¢@ @ÉÔz¡@š“ì…8éV¢@¢@¢@ÉÔz¡@É^ˆ“n% $ $ $L} Q Q Q  üÿµ ØÒ¾eIEND®B`‚uqfoundation-multiprocess-b3457a5/py3.10/000077500000000000000000000000001455552142400202525ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/Modules/000077500000000000000000000000001455552142400216625ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/000077500000000000000000000000001455552142400245525ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400260135ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/clinic/multiprocessing.c.h000066400000000000000000000076121455552142400316420ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_closesocket__doc__, "closesocket($module, handle, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF \ {"closesocket", (PyCFunction)_multiprocessing_closesocket, METH_O, _multiprocessing_closesocket__doc__}, static PyObject * _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle); static PyObject * _multiprocessing_closesocket(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; HANDLE handle; if (!PyArg_Parse(arg, ""F_HANDLE":closesocket", &handle)) { goto exit; } return_value = _multiprocessing_closesocket_impl(module, handle); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_recv__doc__, "recv($module, handle, size, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_RECV_METHODDEF \ {"recv", (PyCFunction)(void(*)(void))_multiprocessing_recv, METH_FASTCALL, _multiprocessing_recv__doc__}, static PyObject * _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size); static PyObject * _multiprocessing_recv(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; int size; if (!_PyArg_ParseStack(args, nargs, ""F_HANDLE"i:recv", &handle, &size)) { goto exit; } return_value = _multiprocessing_recv_impl(module, handle, size); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_send__doc__, "send($module, handle, buf, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEND_METHODDEF \ {"send", (PyCFunction)(void(*)(void))_multiprocessing_send, METH_FASTCALL, _multiprocessing_send__doc__}, static PyObject * _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf); static PyObject * _multiprocessing_send(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; Py_buffer buf = {NULL, NULL}; if (!_PyArg_ParseStack(args, nargs, ""F_HANDLE"y*:send", &handle, &buf)) { goto exit; } return_value = _multiprocessing_send_impl(module, handle, &buf); exit: /* Cleanup for buf */ if (buf.obj) { PyBuffer_Release(&buf); } return return_value; } #endif /* defined(MS_WINDOWS) */ PyDoc_STRVAR(_multiprocessing_sem_unlink__doc__, "sem_unlink($module, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEM_UNLINK_METHODDEF \ {"sem_unlink", (PyCFunction)_multiprocessing_sem_unlink, METH_O, _multiprocessing_sem_unlink__doc__}, static PyObject * _multiprocessing_sem_unlink_impl(PyObject *module, const char *name); static PyObject * _multiprocessing_sem_unlink(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; const char *name; if (!PyUnicode_Check(arg)) { _PyArg_BadArgument("sem_unlink", "argument", "str", arg); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(arg, &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } return_value = _multiprocessing_sem_unlink_impl(module, name); exit: return return_value; } #ifndef _MULTIPROCESSING_CLOSESOCKET_METHODDEF #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF #endif /* !defined(_MULTIPROCESSING_CLOSESOCKET_METHODDEF) */ #ifndef _MULTIPROCESSING_RECV_METHODDEF #define _MULTIPROCESSING_RECV_METHODDEF #endif /* !defined(_MULTIPROCESSING_RECV_METHODDEF) */ #ifndef _MULTIPROCESSING_SEND_METHODDEF #define _MULTIPROCESSING_SEND_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEND_METHODDEF) */ /*[clinic end generated code: output=418191c446cd5751 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000072611455552142400306070ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", (PyCFunction)(void(*)(void))_posixshmem_shm_open, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", "flags", "mode", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_open", 0}; PyObject *argsbuf[3]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; PyObject *path; int flags; int mode = 511; int _return_value; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; flags = _PyLong_AsInt(args[1]); if (flags == -1 && PyErr_Occurred()) { goto exit; } if (!noptargs) { goto skip_optional_pos; } mode = _PyLong_AsInt(args[2]); if (mode == -1 && PyErr_Occurred()) { goto exit; } skip_optional_pos: _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", (PyCFunction)(void(*)(void))_posixshmem_shm_unlink, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_unlink", 0}; PyObject *argsbuf[1]; PyObject *path; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=bca8e78d0f43ef1a input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/clinic/semaphore.c.h000066400000000000000000000303071455552142400303730ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", (PyCFunction)(void(*)(void))_multiprocessing_SemLock_acquire, METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "acquire", 0}; PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = _PyLong_AsInt(args[0]); if (blocking == -1 && PyErr_Occurred()) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(MS_WINDOWS) */ #if !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", (PyCFunction)(void(*)(void))_multiprocessing_SemLock_acquire, METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "acquire", 0}; PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = _PyLong_AsInt(args[0]); if (blocking == -1 && PyErr_Occurred()) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* !defined(MS_WINDOWS) */ #if !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* !defined(MS_WINDOWS) */ static PyObject * _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink); static PyObject * _multiprocessing_SemLock(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; static const char * const _keywords[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "SemLock", 0}; PyObject *argsbuf[5]; PyObject * const *fastargs; Py_ssize_t nargs = PyTuple_GET_SIZE(args); int kind; int value; int maxvalue; const char *name; int unlink; fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 5, 5, 0, argsbuf); if (!fastargs) { goto exit; } kind = _PyLong_AsInt(fastargs[0]); if (kind == -1 && PyErr_Occurred()) { goto exit; } value = _PyLong_AsInt(fastargs[1]); if (value == -1 && PyErr_Occurred()) { goto exit; } maxvalue = _PyLong_AsInt(fastargs[2]); if (maxvalue == -1 && PyErr_Occurred()) { goto exit; } if (!PyUnicode_Check(fastargs[3])) { _PyArg_BadArgument("SemLock", "argument 'name'", "str", fastargs[3]); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(fastargs[3], &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } unlink = _PyLong_AsInt(fastargs[4]); if (unlink == -1 && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_SemLock_impl(type, kind, value, maxvalue, name, unlink); exit: return return_value; } PyDoc_STRVAR(_multiprocessing_SemLock__rebuild__doc__, "_rebuild($type, handle, kind, maxvalue, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF \ {"_rebuild", (PyCFunction)(void(*)(void))_multiprocessing_SemLock__rebuild, METH_FASTCALL|METH_CLASS, _multiprocessing_SemLock__rebuild__doc__}, static PyObject * _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name); static PyObject * _multiprocessing_SemLock__rebuild(PyTypeObject *type, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; SEM_HANDLE handle; int kind; int maxvalue; const char *name; if (!_PyArg_ParseStack(args, nargs, ""F_SEM_HANDLE"iiz:_rebuild", &handle, &kind, &maxvalue, &name)) { goto exit; } return_value = _multiprocessing_SemLock__rebuild_impl(type, handle, kind, maxvalue, name); exit: return return_value; } PyDoc_STRVAR(_multiprocessing_SemLock__count__doc__, "_count($self, /)\n" "--\n" "\n" "Num of `acquire()`s minus num of `release()`s for this process."); #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF \ {"_count", (PyCFunction)_multiprocessing_SemLock__count, METH_NOARGS, _multiprocessing_SemLock__count__doc__}, static PyObject * _multiprocessing_SemLock__count_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__count_impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock__is_mine__doc__, "_is_mine($self, /)\n" "--\n" "\n" "Whether the lock is owned by this thread."); #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF \ {"_is_mine", (PyCFunction)_multiprocessing_SemLock__is_mine, METH_NOARGS, _multiprocessing_SemLock__is_mine__doc__}, static PyObject * _multiprocessing_SemLock__is_mine_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_mine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_mine_impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, "_get_value($self, /)\n" "--\n" "\n" "Get the value of the semaphore."); #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__get_value(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__get_value_impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, "_is_zero($self, /)\n" "--\n" "\n" "Return whether semaphore has value zero."); #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, static PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_zero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_zero_impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock__after_fork__doc__, "_after_fork($self, /)\n" "--\n" "\n" "Rezero the net acquisition count after fork()."); #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF \ {"_after_fork", (PyCFunction)_multiprocessing_SemLock__after_fork, METH_NOARGS, _multiprocessing_SemLock__after_fork__doc__}, static PyObject * _multiprocessing_SemLock__after_fork_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__after_fork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__after_fork_impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock___enter____doc__, "__enter__($self, /)\n" "--\n" "\n" "Enter the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF \ {"__enter__", (PyCFunction)_multiprocessing_SemLock___enter__, METH_NOARGS, _multiprocessing_SemLock___enter____doc__}, static PyObject * _multiprocessing_SemLock___enter___impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock___enter__(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock___enter___impl(self); } PyDoc_STRVAR(_multiprocessing_SemLock___exit____doc__, "__exit__($self, exc_type=None, exc_value=None, exc_tb=None, /)\n" "--\n" "\n" "Exit the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF \ {"__exit__", (PyCFunction)(void(*)(void))_multiprocessing_SemLock___exit__, METH_FASTCALL, _multiprocessing_SemLock___exit____doc__}, static PyObject * _multiprocessing_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb); static PyObject * _multiprocessing_SemLock___exit__(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; PyObject *exc_type = Py_None; PyObject *exc_value = Py_None; PyObject *exc_tb = Py_None; if (!_PyArg_CheckPositional("__exit__", nargs, 0, 3)) { goto exit; } if (nargs < 1) { goto skip_optional; } exc_type = args[0]; if (nargs < 2) { goto skip_optional; } exc_value = args[1]; if (nargs < 3) { goto skip_optional; } exc_tb = args[2]; skip_optional: return_value = _multiprocessing_SemLock___exit___impl(self, exc_type, exc_value, exc_tb); exit: return return_value; } #ifndef _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF) */ /*[clinic end generated code: output=e7fd938150601fe5 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/multiprocess.c000066400000000000000000000154711455552142400274570ustar00rootroot00000000000000/* * Extension module used by multiprocess package * * multiprocess.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /*[python input] class HANDLE_converter(CConverter): type = "HANDLE" format_unit = '"F_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=9fad6080b79ace91]*/ /*[clinic input] module _multiprocess [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=01e0745f380ac6e3]*/ #include "clinic/multiprocessing.c.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS /*[clinic input] _multiprocess.closesocket handle: HANDLE / [clinic start generated code]*/ static PyObject * _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) /*[clinic end generated code: output=214f359f900966f4 input=8a20706dd386c6cc]*/ { int ret; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } /*[clinic input] _multiprocess.recv handle: HANDLE size: int / [clinic start generated code]*/ static PyObject * _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) /*[clinic end generated code: output=92322781ba9ff598 input=6a5b0834372cee5b]*/ { int nread; PyObject *buf; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } /*[clinic input] _multiprocess.send handle: HANDLE buf: Py_buffer / [clinic start generated code]*/ static PyObject * _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) /*[clinic end generated code: output=52d7df0519c596cb input=41dce742f98d2210]*/ { int ret, length; length = (int)Py_MIN(buf->len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf->buf, length, 0); Py_END_ALLOW_THREADS if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /*[clinic input] _multiprocess.sem_unlink name: str / [clinic start generated code]*/ static PyObject * _multiprocess_sem_unlink_impl(PyObject *module, const char *name) /*[clinic end generated code: output=fcbfeb1ed255e647 input=bf939aff9564f1d5]*/ { return _PyMp_sem_unlink(name); } /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS _MULTIPROCESSING_CLOSESOCKET_METHODDEF _MULTIPROCESSING_RECV_METHODDEF _MULTIPROCESSING_SEND_METHODDEF #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) _MULTIPROCESSING_SEM_UNLINK_METHODDEF #endif {NULL} }; /* * Initialize */ static int multiprocess_exec(PyObject *module) { #if defined(MS_WINDOWS) || \ (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) /* Add _PyMp_SemLock type to module */ if (PyModule_AddType(module, &_PyMp_SemLockType) < 0) { return -1; } { PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) py_sem_value_max = PyLong_FromLong(INT_MAX); else py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); if (py_sem_value_max == NULL) { return -1; } if (PyDict_SetItemString(_PyMp_SemLockType.tp_dict, "SEM_VALUE_MAX", py_sem_value_max) < 0) { Py_DECREF(py_sem_value_max); return -1; } Py_DECREF(py_sem_value_max); } #endif /* Add configuration macros */ PyObject *flags = PyDict_New(); if (!flags) { return -1; } #define ADD_FLAG(name) \ do { \ PyObject *value = PyLong_FromLong(name); \ if (value == NULL) { \ Py_DECREF(flags); \ return -1; \ } \ if (PyDict_SetItemString(flags, #name, value) < 0) { \ Py_DECREF(flags); \ Py_DECREF(value); \ return -1; \ } \ Py_DECREF(value); \ } while (0) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", flags) < 0) { Py_DECREF(flags); return -1; } return 0; } static PyModuleDef_Slot multiprocess_slots[] = { {Py_mod_exec, multiprocess_exec}, {0, NULL} }; static struct PyModuleDef multiprocess_module = { PyModuleDef_HEAD_INIT, .m_name = "_multiprocess", .m_methods = module_methods, .m_slots = multiprocess_slots, }; PyMODINIT_FUNC PyInit__multiprocess(void) { return PyModuleDef_Init(&multiprocess_module); } uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/multiprocess.h000066400000000000000000000041361455552142400274600ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # include typedef sem_t *SEM_HANDLE; # endif #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyTypeObject _PyMp_SemLockType; extern PyObject *_PyMp_sem_unlink(const char *name); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/posixshmem.c000066400000000000000000000055071455552142400271210ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #define PY_SSIZE_T_CLEAN #include // for shm_open() and shm_unlink() #ifdef HAVE_SYS_MMAN_H #include #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static struct PyModuleDef _posixshmemmodule = { PyModuleDef_HEAD_INIT, .m_name = "_posixshmem", .m_doc = "POSIX shared memory module", .m_size = 0, .m_methods = module_methods, }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { return PyModuleDef_Init(&_posixshmemmodule); } uqfoundation-multiprocess-b3457a5/py3.10/Modules/_multiprocess/semaphore.c000066400000000000000000000521231455552142400267040ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; /*[python input] class SEM_HANDLE_converter(CConverter): type = "SEM_HANDLE" format_unit = '"F_SEM_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3e0ad43e482d8716]*/ /*[clinic input] module _multiprocess class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=935fb41b7d032599]*/ #include "clinic/semaphore.c.h" #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool(accept={int}) = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ { double timeout; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif // ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } // #endif /* !HAVE_SEM_TIMEDWAIT */ /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool(accept={int}) = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ { int res, err = 0; struct timespec deadline = {0}; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) { return NULL; } if (timeout < 0.0) { timeout = 0.0; } struct timeval now; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } long sec = (long) timeout; long nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } /*[clinic input] @classmethod _multiprocess.SemLock.__new__ kind: int value: int maxvalue: int name: str unlink: bool(accept={int}) [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink) /*[clinic end generated code: output=30727e38f5f7577a input=b378c3ee27d3a0fa]*/ { SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } return NULL; } /*[clinic input] @classmethod _multiprocess.SemLock._rebuild handle: SEM_HANDLE kind: int maxvalue: int name: str(accept={str, NoneType}) / [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name) /*[clinic end generated code: output=2aaee14f063f3bd9 input=f7040492ac6d9962]*/ { char *name_copy = NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyMem_Free(name_copy); return PyErr_SetFromErrno(PyExc_OSError); } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); Py_TYPE(self)->tp_free((PyObject*)self); } /*[clinic input] _multiprocess.SemLock._count Num of `acquire()`s minus num of `release()`s for this process. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__count_impl(SemLockObject *self) /*[clinic end generated code: output=5ba8213900e517bb input=36fc59b1cd1025ab]*/ { return PyLong_FromLong((long)self->count); } /*[clinic input] _multiprocess.SemLock._is_mine Whether the lock is owned by this thread. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_mine_impl(SemLockObject *self) /*[clinic end generated code: output=92dc98863f4303be input=a96664cb2f0093ba]*/ { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } /*[clinic input] _multiprocess.SemLock._get_value Get the value of the semaphore. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__get_value_impl(SemLockObject *self) /*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } /*[clinic input] _multiprocess.SemLock._is_zero Return whether semaphore has value zero. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_zero_impl(SemLockObject *self) /*[clinic end generated code: output=815d4c878c806ed7 input=294a446418d31347]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } /*[clinic input] _multiprocess.SemLock._after_fork Rezero the net acquisition count after fork(). [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__after_fork_impl(SemLockObject *self) /*[clinic end generated code: output=718bb27914c6a6c1 input=190991008a76621e]*/ { self->count = 0; Py_RETURN_NONE; } /*[clinic input] _multiprocess.SemLock.__enter__ Enter the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___enter___impl(SemLockObject *self) /*[clinic end generated code: output=beeb2f07c858511f input=c5e27d594284690b]*/ { return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); } /*[clinic input] _multiprocess.SemLock.__exit__ exc_type: object = None exc_value: object = None exc_tb: object = None / Exit the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb) /*[clinic end generated code: output=3b37c1a9f8b91a03 input=7d644b64a89903f8]*/ { return _multiprocess_SemLock_release_impl(self); } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { _MULTIPROCESS_SEMLOCK_ACQUIRE_METHODDEF _MULTIPROCESS_SEMLOCK_RELEASE_METHODDEF _MULTIPROCESS_SEMLOCK___ENTER___METHODDEF _MULTIPROCESS_SEMLOCK___EXIT___METHODDEF _MULTIPROCESS_SEMLOCK__COUNT_METHODDEF _MULTIPROCESS_SEMLOCK__IS_MINE_METHODDEF _MULTIPROCESS_SEMLOCK__GET_VALUE_METHODDEF _MULTIPROCESS_SEMLOCK__IS_ZERO_METHODDEF _MULTIPROCESS_SEMLOCK__REBUILD_METHODDEF _MULTIPROCESS_SEMLOCK__AFTER_FORK_METHODDEF {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(SemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(SemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject _PyMp_SemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_multiprocess.SemLock", /* tp_basicsize */ sizeof(SemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)semlock_dealloc, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ semlock_methods, /* tp_members */ semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ _multiprocess_SemLock, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(const char *name) { if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } uqfoundation-multiprocess-b3457a5/py3.10/README_MODS000066400000000000000000001012701455552142400217550ustar00rootroot00000000000000cp -rf py3.9/examples . cp -rf py3.9/doc . cp -f py3.9/index.html . cp -rf Python-3.10.0a5/Modules/_multiprocessing Modules/_multiprocess cp -rf py3.9/_multiprocess _multiprocess cp -rf py3.9/multiprocess multiprocess cp Python-3.10.0a6/Lib/test/mp_*py multiprocess/tests cp Python-3.10.0a6/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py # ---------------------------------------------------------------------- diff Python-3.10.0a5/Modules/_multiprocessing/semaphore.c Modules/_multiprocess/semaphore.c 10c10 < #include "multiprocessing.h" --- > #include "multiprocess.h" 33,34c33,34 < module _multiprocessing < class _multiprocessing.SemLock "SemLockObject *" "&_PyMp_SemLockType" --- > module _multiprocess > class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" 78c78 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 87c87 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 169c169 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 175c175 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 230c230 < #ifndef HAVE_SEM_TIMEDWAIT --- > // ifndef HAVE_SEM_TIMEDWAIT 291c291 < #endif /* !HAVE_SEM_TIMEDWAIT */ --- > // #endif /* !HAVE_SEM_TIMEDWAIT */ 294c294 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 303c303 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 379c379 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 385c385 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 471c471 < _multiprocessing.SemLock.__new__ --- > _multiprocess.SemLock.__new__ 482c482 < _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, --- > _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, 530c530 < _multiprocessing.SemLock._rebuild --- > _multiprocess.SemLock._rebuild 541c541 < _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, --- > _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, 578c578 < _multiprocessing.SemLock._count --- > _multiprocess.SemLock._count 584c584 < _multiprocessing_SemLock__count_impl(SemLockObject *self) --- > _multiprocess_SemLock__count_impl(SemLockObject *self) 591c591 < _multiprocessing.SemLock._is_mine --- > _multiprocess.SemLock._is_mine 597c597 < _multiprocessing_SemLock__is_mine_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_mine_impl(SemLockObject *self) 605c605 < _multiprocessing.SemLock._get_value --- > _multiprocess.SemLock._get_value 611c611 < _multiprocessing_SemLock__get_value_impl(SemLockObject *self) --- > _multiprocess_SemLock__get_value_impl(SemLockObject *self) 630c630 < _multiprocessing.SemLock._is_zero --- > _multiprocess.SemLock._is_zero 636c636 < _multiprocessing_SemLock__is_zero_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_zero_impl(SemLockObject *self) 658c658 < _multiprocessing.SemLock._after_fork --- > _multiprocess.SemLock._after_fork 664c664 < _multiprocessing_SemLock__after_fork_impl(SemLockObject *self) --- > _multiprocess_SemLock__after_fork_impl(SemLockObject *self) 672c672 < _multiprocessing.SemLock.__enter__ --- > _multiprocess.SemLock.__enter__ 678c678 < _multiprocessing_SemLock___enter___impl(SemLockObject *self) --- > _multiprocess_SemLock___enter___impl(SemLockObject *self) 681c681 < return _multiprocessing_SemLock_acquire_impl(self, 1, Py_None); --- > return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); 685c685 < _multiprocessing.SemLock.__exit__ --- > _multiprocess.SemLock.__exit__ 696c696 < _multiprocessing_SemLock___exit___impl(SemLockObject *self, --- > _multiprocess_SemLock___exit___impl(SemLockObject *self, 701c701 < return _multiprocessing_SemLock_release_impl(self); --- > return _multiprocess_SemLock_release_impl(self); 744c744 < /* tp_name */ "_multiprocessing.SemLock", --- > /* tp_name */ "_multiprocess.SemLock", 780c780 < /* tp_new */ _multiprocessing_SemLock, --- > /* tp_new */ _multiprocess_SemLock, # ---------------------------------------------------------------------- $ mv multiprocessing.c multiprocess.c $ mv multiprocessing.h multiprocess.h # ---------------------------------------------------------------------- diff Python-3.10.0a5/Lib/multiprocessing/resource_tracker.py Python-3.10.0a6/Lib/multiprocessing/resource_tracker.py 39a40,48 > # Use sem_unlink() to clean up named semaphores. > # > # sem_unlink() may be missing if the Python build process detected the > # absence of POSIX named semaphores. In that case, no named semaphores were > # ever opened, so no cleanup would be necessary. > if hasattr(_multiprocessing, 'sem_unlink'): > _CLEANUP_FUNCS.update({ > 'semaphore': _multiprocessing.sem_unlink, > }) 41d49 < 'semaphore': _multiprocessing.sem_unlink, # ---------------------------------------------------------------------- diff Python-3.10.0a6/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py 23c23 < import pickle --- > import pickle #XXX: use dill? 40c40 < support.skip_if_broken_multiprocessing_synchronize() --- > import_helper.import_module('multiprocess.synchronize') 43,48c43,49 < import multiprocessing.connection < import multiprocessing.dummy < import multiprocessing.heap < import multiprocessing.managers < import multiprocessing.pool < import multiprocessing.queues --- > import multiprocess as multiprocessing > import multiprocess.connection > import multiprocess.dummy > import multiprocess.heap > import multiprocess.managers > import multiprocess.pool > import multiprocess.queues 50c51 < from multiprocessing import util --- > from multiprocess import util 53c54 < from multiprocessing import reduction --- > from multiprocess import reduction 59c60 < from multiprocessing.sharedctypes import Value, copy --- > from multiprocess.sharedctypes import Value, copy 65c66 < from multiprocessing import shared_memory --- > from multiprocess import shared_memory 93c94 < from multiprocessing import resource_tracker --- > from multiprocess import resource_tracker 121c122 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 134c135 < PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] --- > PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] 289c290 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 292c293 < def test_parent_process(self): --- > def _test_parent_process(self): 325c326 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 492a494 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 744c746 < from multiprocessing.forkserver import _forkserver --- > from multiprocess.forkserver import _forkserver 833,834c835,836 < self.assertIn("test_multiprocessing.py", err) < self.assertIn("1/0 # MARKER", err) --- > self.assertIn("__init__.py", err) > #self.assertIn("1/0 # MARKER", err) #FIXME 1123c1125 < import multiprocessing --- > import multiprocess as multiprocessing 2016c2018 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 2582a2585 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2624a2628 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2722,2723c2726,2727 < def test_unpickleable_result(self): < from multiprocessing.pool import MaybeEncodingError --- > def _test_unpickleable_result(self): > from multiprocess.pool import MaybeEncodingError 2811c2815 < from multiprocessing.managers import BaseManager, BaseProxy, RemoteError --- > from multiprocess.managers import BaseManager, BaseProxy, RemoteError 3454c3458 < from multiprocessing import resource_sharer --- > from multiprocess import resource_sharer 3699c3703 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 3749c3753 < @unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory") --- > @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") 4310,4312c4314,4316 < modules = ['multiprocessing.' + m for m in modules] < modules.remove('multiprocessing.__init__') < modules.append('multiprocessing') --- > modules = ['multiprocess.' + m for m in modules] > modules.remove('multiprocess.__init__') > modules.append('multiprocess') 4318,4320c4322,4324 < modules.remove('multiprocessing.popen_fork') < modules.remove('multiprocessing.popen_forkserver') < modules.remove('multiprocessing.popen_spawn_posix') --- > modules.remove('multiprocess.popen_fork') > modules.remove('multiprocess.popen_forkserver') > modules.remove('multiprocess.popen_spawn_posix') 4322c4326 < modules.remove('multiprocessing.popen_spawn_win32') --- > modules.remove('multiprocess.popen_spawn_win32') 4324c4328 < modules.remove('multiprocessing.popen_forkserver') --- > modules.remove('multiprocess.popen_forkserver') 4328c4332 < modules.remove('multiprocessing.sharedctypes') --- > modules.remove('multiprocess.sharedctypes') 4610c4614 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4650c4654 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4691c4695 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4719c4723 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4762c4766 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4810c4814 < def test_flags(self): --- > def _test_flags(self): 4813c4817 < prog = ('from test._test_multiprocessing import TestFlags; ' + --- > prog = ('from multiprocess.tests import TestFlags; ' + 5115c5119 < def test_resource_tracker(self): --- > def _test_resource_tracker(self): 5121,5123c5125,5127 < import multiprocessing as mp < from multiprocessing import resource_tracker < from multiprocessing.shared_memory import SharedMemory --- > import multiprocess as mp > from multiprocess import resource_tracker > from multiprocess.shared_memory import SharedMemory 5192c5196 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5240c5244 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5249c5253 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5416c5420 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " 5694c5698 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " # ---------------------------------------------------------------------- diff Python-3.10.0a7/Lib/test/_test_multiprocessing.py Python-3.10.0a6/Lib/test/_test_multiprocessing.py 829c829 < with open(testfn, encoding="utf-8") as f: --- > with open(testfn, 'r') as f: 839c839 < sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) --- > sys.stderr = open(fd, 'w', closefd=False) 846c846 < sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) --- > sys.stderr = open(fd, 'w', closefd=False) 867c867 < with open(testfn, encoding="utf-8") as f: --- > with open(testfn, 'r') as f: 1121c1121 < with open(module_name + '.py', 'w', encoding="utf-8") as f: --- > with open(module_name + '.py', 'w') as f: diff Python-3.10.0a7/Lib/multiprocessing/util.py Python-3.10.0a6/Lib/multiprocessing/util.py 422c422 < sys.stdin = open(fd, encoding="utf-8", closefd=False) --- > sys.stdin = open(fd, closefd=False) # ---------------------------------------------------------------------- diff Python-3.10.0b1/Lib/multiprocessing/managers.py Python-3.10.0a7/Lib/multiprocessing/managers.py 195,196c195,199 < def _handle_request(self, c): < request = None --- > def handle_request(self, c): > ''' > Handle a new connection > ''' > funcname = result = request = None 213d215 < 225,235c227 < def handle_request(self, conn): < ''' < Handle a new connection < ''' < try: < self._handle_request(conn) < except SystemExit: < # Server.serve_client() calls sys.exit(0) on EOF < pass < finally: < conn.close() --- > c.close() # ---------------------------------------------------------------------- diff Python-3.10.0b1/Lib/multiprocessing/managers.py Python-3.10.0rc2/Lib/multiprocessing/managers.py 11,12c11 < __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', < 'SharedMemoryManager' ] --- > __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] 38d36 < HAS_SHMEM = True 40a39,41 > else: > HAS_SHMEM = True > __all__.append('SharedMemoryManager') 970c971 < exposed=None, incref=True): --- > exposed=None, incref=True, manager_owned=False): 990c991 < incref=incref) --- > incref=incref, manager_owned=manager_owned) # ---------------------------------------------------------------------- diff Python-3.10.0b1/Lib/test/_test_multiprocessing.py Python-3.10.0rc2/Lib/test/_test_multiprocessing.py 2288a2289,2298 > def test_nested_queue(self): > a = self.list() # Test queue inside list > a.append(self.Queue()) > a[0].put(123) > self.assertEqual(a[0].get(), 123) > b = self.dict() # Test queue inside dict > b[0] = self.Queue() > b[0].put(456) > self.assertEqual(b[0].get(), 456) > 3782d3791 < self.assertEqual(sms.size, sms2.size) # ---------------------------------------------------------------------- diff Python-3.10.0rc2/Lib/test/_test_multiprocessing.py Python-3.10.0/Lib//test/_test_multiprocessing.py 3773a3774,3779 > def _new_shm_name(self, prefix): > # Add a PID to the name of a POSIX shared memory object to allow > # running multiprocessing tests (test_multiprocessing_fork, > # test_multiprocessing_spawn, etc) in parallel. > return prefix + str(os.getpid()) > 3775c3781,3782 < sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) --- > name_tsmb = self._new_shm_name('test01_tsmb') > sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) 3779c3786 < self.assertEqual(sms.name, 'test01_tsmb') --- > self.assertEqual(sms.name, name_tsmb) 3799c3806 < also_sms = shared_memory.SharedMemory('test01_tsmb') --- > also_sms = shared_memory.SharedMemory(name_tsmb) 3804c3811 < same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) --- > same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) 3822c3829 < names = ['test01_fn', 'test02_fn'] --- > names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] 3843a3851,3856 > name_dblunlink = self._new_shm_name('test01_dblunlink') > sms_uno = shared_memory.SharedMemory( > name_dblunlink, > create=True, > size=5000 > ) 3845,3850d3857 < sms_uno = shared_memory.SharedMemory( < 'test01_dblunlink', < create=True, < size=5000 < ) < 3854c3861 < sms_duo = shared_memory.SharedMemory('test01_dblunlink') --- > sms_duo = shared_memory.SharedMemory(name_dblunlink) 3866c3873 < 'test01_tsmb', --- > name_tsmb, 3880c3887 < ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') --- > ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) 4085c4092,4093 < sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') --- > name_duplicate = self._new_shm_name('test03_duplicate') > sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) 4088c4096 < self.assertEqual('test03_duplicate', sl_copy.shm.name) --- > self.assertEqual(name_duplicate, sl_copy.shm.name) # ---------------------------------------------------------------------- diff Python-3.10.0/Lib/test/_test_multiprocessing.py py3.10/multiprocess/tests/__init__.py 3826c3830 > 'multiprocessing.shared_memory._make_filename') as mock_make_filename: --- < 'multiprocess.shared_memory._make_filename') as mock_make_filename: # ---------------------------------------------------------------------- diff Python-3.10.0/Lib/test/_test_multiprocessing.py Python-3.10.1/Lib/test/_test_multiprocessing.py 613a614 > gc.collect() # For PyPy or other GCs. 2669a2671 > gc.collect() # For PyPy or other GCs. 3794,3800d3795 < # Test pickling < sms.buf[0:6] = b'pickle' < pickled_sms = pickle.dumps(sms) < sms2 = pickle.loads(pickled_sms) < self.assertEqual(sms.name, sms2.name) < self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle') < 3898a3894,3916 > def test_shared_memory_recreate(self): > # Test if shared memory segment is created properly, > # when _make_filename returns an existing shared memory segment name > with unittest.mock.patch( > 'multiprocessing.shared_memory._make_filename') as mock_make_filename: > > NAME_PREFIX = shared_memory._SHM_NAME_PREFIX > names = ['test01_fn', 'test02_fn'] > # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary > # because some POSIX compliant systems require name to start with / > names = [NAME_PREFIX + name for name in names] > > mock_make_filename.side_effect = names > shm1 = shared_memory.SharedMemory(create=True, size=1) > self.addCleanup(shm1.unlink) > self.assertEqual(shm1._name, names[0]) > > mock_make_filename.side_effect = names > shm2 = shared_memory.SharedMemory(create=True, size=1) > self.addCleanup(shm2.unlink) > self.assertEqual(shm2._name, names[1]) > > def test_invalid_shared_memory_cration(self): 3910a3929,3969 > def test_shared_memory_pickle_unpickle(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sms = shared_memory.SharedMemory(create=True, size=512) > self.addCleanup(sms.unlink) > sms.buf[0:6] = b'pickle' > > # Test pickling > pickled_sms = pickle.dumps(sms, protocol=proto) > > # Test unpickling > sms2 = pickle.loads(pickled_sms) > self.assertIsInstance(sms2, shared_memory.SharedMemory) > self.assertEqual(sms.name, sms2.name) > self.assertEqual(bytes(sms.buf[0:6]), b'pickle') > self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') > > # Test that unpickled version is still the same SharedMemory > sms.buf[0:6] = b'newval' > self.assertEqual(bytes(sms.buf[0:6]), b'newval') > self.assertEqual(bytes(sms2.buf[0:6]), b'newval') > > sms2.buf[0:6] = b'oldval' > self.assertEqual(bytes(sms.buf[0:6]), b'oldval') > self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') > > def test_shared_memory_pickle_unpickle_dead_object(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sms = shared_memory.SharedMemory(create=True, size=512) > sms.buf[0:6] = b'pickle' > pickled_sms = pickle.dumps(sms, protocol=proto) > > # Now, we are going to kill the original object. > # So, unpickled one won't be able to attach to it. > sms.close() > sms.unlink() > > with self.assertRaises(FileNotFoundError): > pickle.loads(pickled_sms) > 4128,4129c4187,4222 < sl = shared_memory.ShareableList(range(10)) < self.addCleanup(sl.shm.unlink) --- > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sl = shared_memory.ShareableList(range(10)) > self.addCleanup(sl.shm.unlink) > > serialized_sl = pickle.dumps(sl, protocol=proto) > deserialized_sl = pickle.loads(serialized_sl) > self.assertIsInstance( > deserialized_sl, shared_memory.ShareableList) > self.assertEqual(deserialized_sl[-1], 9) > self.assertIsNot(sl, deserialized_sl) > > deserialized_sl[4] = "changed" > self.assertEqual(sl[4], "changed") > sl[3] = "newvalue" > self.assertEqual(deserialized_sl[3], "newvalue") > > larger_sl = shared_memory.ShareableList(range(400)) > self.addCleanup(larger_sl.shm.unlink) > serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) > self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) > larger_sl.shm.close() > > deserialized_sl.shm.close() > sl.shm.close() > > def test_shared_memory_ShareableList_pickling_dead_object(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sl = shared_memory.ShareableList(range(10)) > serialized_sl = pickle.dumps(sl, protocol=proto) > > # Now, we are going to kill the original object. > # So, unpickled one won't be able to attach to it. > sl.shm.close() > sl.shm.unlink() 4131,4150c4224,4225 < serialized_sl = pickle.dumps(sl) < deserialized_sl = pickle.loads(serialized_sl) < self.assertTrue( < isinstance(deserialized_sl, shared_memory.ShareableList) < ) < self.assertTrue(deserialized_sl[-1], 9) < self.assertFalse(sl is deserialized_sl) < deserialized_sl[4] = "changed" < self.assertEqual(sl[4], "changed") < < # Verify data is not being put into the pickled representation. < name = 'a' * len(sl.shm.name) < larger_sl = shared_memory.ShareableList(range(400)) < self.addCleanup(larger_sl.shm.unlink) < serialized_larger_sl = pickle.dumps(larger_sl) < self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) < larger_sl.shm.close() < < deserialized_sl.shm.close() < sl.shm.close() --- > with self.assertRaises(FileNotFoundError): > pickle.loads(serialized_sl) 4186a4262,4268 > # Without this line it was raising warnings like: > # UserWarning: resource_tracker: > # There appear to be 1 leaked shared_memory > # objects to clean up at shutdown > # See: https://bugs.python.org/issue45209 > resource_tracker.unregister(f"/{name}", "shared_memory") > 4196c4278 < # --- > # Test to verify that `Finalize` works. 4207a4290 > gc.collect() # For PyPy or other GCs. 4218a4302 > gc.collect() # For PyPy or other GCs. 4224a4309 > gc.collect() # For PyPy or other GCs. # ---------------------------------------------------------------------- $ diff Python-3.10.2/Lib/test/_test_multiprocessing.py Python-3.10.4/Lib/test/_test_multiprocessing.py 75a76,81 > if support.check_sanitizer(address=True): > # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to > # work around a libasan race condition: dead lock in pthread_create(). > raise unittest.SkipTest("libasan has a pthread_create() dead lock") > > # ---------------------------------------------------------------------- diff Python-3.10.4/Lib/multiprocessing/managers.py Python-3.10.5/Lib/multiprocessing/managers.py 680c680 < process.join(timeout=0.1) --- > process.join(timeout=1.0) diff Python-3.10.4/Lib/multiprocessing/queues.py Python-3.10.5/Lib/multiprocessing/queues.py 142,148c142,145 < try: < self._reader.close() < finally: < close = self._close < if close: < self._close = None < close() --- > close = self._close > if close: > self._close = None > close() 172,173c169,171 < self._wlock, self._writer.close, self._ignore_epipe, < self._on_queue_feeder_error, self._sem), --- > self._wlock, self._reader.close, self._writer.close, > self._ignore_epipe, self._on_queue_feeder_error, > self._sem), 214,215c212,213 < def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, < onerror, queue_sem): --- > def _feed(buffer, notempty, send_bytes, writelock, reader_close, > writer_close, ignore_epipe, onerror, queue_sem): 241c239,240 < close() --- > reader_close() > writer_close() diff Python-3.10.4/Lib/multiprocessing/util.py Python-3.10.5/Lib/multiprocessing/util.py 123c123 < raise TypeError('address type of {address!r} unrecognized') --- > raise TypeError(f'address type of {address!r} unrecognized') # ---------------------------------------------------------------------- diff Python-3.10.5/Modules/_multiprocessing/semaphore.c Python-3.10.6/Modules/_multiprocessing/semaphore.c 455,457c455 < SemLockObject *self; < < self = PyObject_New(SemLockObject, type); --- > SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); 574c572 < PyObject_Free(self); --- > Py_TYPE(self)->tp_free((PyObject*)self); diff Python-3.10.5/Lib/multiprocessing/context.py Python-3.10.6/Lib/multiprocessing/context.py 225a226,229 > @staticmethod > def _after_fork(): > return _default_context.get_context().Process._after_fork() > 285a290,294 > @staticmethod > def _after_fork(): > # process is spawned, nothing to do > pass > 328a338,342 > @staticmethod > def _after_fork(): > # process is spawned, nothing to do > pass > diff Python-3.10.5/Lib/multiprocessing/pool.py Python-3.10.6/Lib/multiprocessing/pool.py 205a206,208 > if maxtasksperchild is not None: > if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: > raise ValueError("maxtasksperchild must be a positive int or None") diff Python-3.10.5/Lib/multiprocessing/process.py Python-3.10.6/Lib/multiprocessing/process.py 307,308c307 < util._finalizer_registry.clear() < util._run_after_forkers() --- > self._after_fork() 338a338,344 > @staticmethod > def _after_fork(): > from . import util > util._finalizer_registry.clear() > util._run_after_forkers() > > diff Python-3.10.5/Lib/multiprocessing/shared_memory.py Python-3.10.6/Lib/multiprocessing/shared_memory.py 25a26 > from . import resource_tracker 119,120c120 < from .resource_tracker import register < register(self._name, "shared_memory") --- > resource_tracker.register(self._name, "shared_memory") 240d239 < from .resource_tracker import unregister 242c241 < unregister(self._name, "shared_memory") --- > resource_tracker.unregister(self._name, "shared_memory") $ diff Python-3.10.5/Lib/test/_test_multiprocessing.py Python-3.10.6/Lib/test/_test_multiprocessing.py 7a8 > import textwrap 2806a2808,2812 > def test_pool_maxtasksperchild_invalid(self): > for value in [0, -1, 0.5, "12"]: > with self.assertRaises(ValueError): > multiprocessing.Pool(3, maxtasksperchild=value) > 3907c3913 < names = ['test01_fn', 'test02_fn'] --- > names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] 5701a5708,5736 > class TestNamedResource(unittest.TestCase): > def test_global_named_resource_spawn(self): > # > # gh-90549: Check that global named resources in main module > # will not leak by a subprocess, in spawn context. > # > testfn = os_helper.TESTFN > self.addCleanup(os_helper.unlink, testfn) > with open(testfn, 'w', encoding='utf-8') as f: > f.write(textwrap.dedent('''\ > import multiprocessing as mp > > ctx = mp.get_context('spawn') > > global_resource = ctx.Semaphore() > > def submain(): pass > > if __name__ == '__main__': > p = ctx.Process(target=submain) > p.start() > p.join() > ''')) > rc, out, err = test.support.script_helper.assert_python_ok(testfn) > # on error, err = 'UserWarning: resource_tracker: There appear to > # be 1 leaked semaphore objects to clean up at shutdown' > self.assertEqual(err, b'') > > 5931a5967,5978 > > > @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') > @unittest.skipIf(sys.platform != "linux", "Linux only") > class SemLockTests(unittest.TestCase): > > def test_semlock_subclass(self): > class SemLock(_multiprocessing.SemLock): > pass > name = f'test_semlock_subclass-{os.getpid()}' > s = SemLock(1, 0, 10, name, 0) > _multiprocessing.sem_unlink(name) # ---------------------------------------------------------------------- diff Python-3.10.6/Lib/multiprocessing/resource_tracker.py Python-3.10.8/Lib/multiprocessing/resource_tracker.py 164c164 < if len(name) > 512: --- > if len(msg) > 512: 167c167 < raise ValueError('name too long') --- > raise ValueError('msg too long') diff Python-3.10.6/Lib/test/_test_multiprocessing.py Python-3.10.8/Lib/test/_test_multiprocessing.py 5379a5380,5387 > def test_too_long_name_resource(self): > # gh-96819: Resource names that will make the length of a write to a pipe > # greater than PIPE_BUF are not allowed > rtype = "shared_memory" > too_long_name_resource = "a" * (512 - len(rtype)) > with self.assertRaises(ValueError): > resource_tracker.register(too_long_name_resource, rtype) > # ---------------------------------------------------------------------- diff Python-3.10.8/Lib/multiprocessing/connection.py Python-3.10.9/Lib/multiprocessing/connection.py 76,80d75 < # Prefer abstract sockets if possible to avoid problems with the address < # size. When coding portable applications, some implementations have < # sun_path as short as 92 bytes in the sockaddr_un struct. < if util.abstract_sockets_supported: < return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" Common subdirectories: Python-3.10.8/Lib/multiprocessing/dummy and Python-3.10.9/Lib/multiprocessing/dummy diff Python-3.10.8/Lib/multiprocessing/shared_memory.py Python-3.10.9/Lib/multiprocessing/shared_memory.py 176c176,179 < size = _winapi.VirtualQuerySize(p_buf) --- > try: > size = _winapi.VirtualQuerySize(p_buf) > finally: > _winapi.UnmapViewOfFile(p_buf) diff Python-3.10.8/Lib/test/_test_multiprocessing.py Python-3.10.9/Lib/test/_test_multiprocessing.py 5985c5985 < s = SemLock(1, 0, 10, name, 0) --- > s = SemLock(1, 0, 10, name, False) uqfoundation-multiprocess-b3457a5/py3.10/_multiprocess/000077500000000000000000000000001455552142400231425ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/_multiprocess/__init__.py000066400000000000000000000005011455552142400252470ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.10/doc/000077500000000000000000000000001455552142400210175ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/doc/CHANGES.html000066400000000000000000001133431455552142400227620ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.10/doc/COPYING.html000066400000000000000000000040211455552142400230120ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.10/doc/INSTALL.html000066400000000000000000000063531455552142400230220ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.10/doc/THANKS.html000066400000000000000000000017751455552142400227070ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.10/doc/__init__.py000066400000000000000000000004001455552142400231220ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.10/doc/connection-objects.html000066400000000000000000000152041455552142400254750ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.10/doc/connection-objects.txt000066400000000000000000000072761455552142400253620ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/connection-ref.html000066400000000000000000000357371455552142400246350ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.10/doc/connection-ref.txt000066400000000000000000000210001455552142400244620ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.10/doc/header.txt000066400000000000000000000003401455552142400230050ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.10/doc/html4css1.css000066400000000000000000000126361455552142400233630ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.10/doc/index.html000066400000000000000000000064761455552142400230310ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.10/doc/index.txt000066400000000000000000000021751455552142400226740ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.10/doc/intro.html000066400000000000000000000427461455552142400230550ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.10/doc/intro.txt000066400000000000000000000301551455552142400227170ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.10/doc/manager-objects.html000066400000000000000000000440461455552142400247560ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.10/doc/manager-objects.txt000066400000000000000000000235161455552142400246300ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/pool-objects.html000066400000000000000000000265511455552142400243160ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.10/doc/pool-objects.txt000066400000000000000000000136411455552142400241650ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.10/doc/process-objects.html000066400000000000000000000235741455552142400250250ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.10/doc/process-objects.txt000066400000000000000000000136131455552142400246710ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/processing-ref.html000066400000000000000000000573611455552142400246470ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.10/doc/processing-ref.txt000066400000000000000000000310141455552142400245050ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/programming-guidelines.html000066400000000000000000000214551455552142400263640ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.10/doc/programming-guidelines.txt000066400000000000000000000150221455552142400262300ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.10/doc/proxy-objects.html000066400000000000000000000175771455552142400245360ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.10/doc/proxy-objects.txt000066400000000000000000000115571455552142400244010ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/queue-objects.html000066400000000000000000000227101455552142400244620ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.10/doc/queue-objects.txt000066400000000000000000000121211455552142400243300ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.10/doc/sharedctypes.html000066400000000000000000000241571455552142400244140ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.10/doc/sharedctypes.txt000066400000000000000000000143071455552142400242630ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.10/doc/tests.html000066400000000000000000000060761455552142400230600ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.10/doc/tests.txt000066400000000000000000000027331455552142400227270ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.10/doc/version.txt000066400000000000000000000000341455552142400232420ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.10/examples/000077500000000000000000000000001455552142400220705ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/examples/__init__.py000066400000000000000000000000001455552142400241670ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/examples/benchmarks.py000066400000000000000000000131321455552142400245570ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.10/examples/ex_newtype.py000066400000000000000000000030731455552142400246340ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.10/examples/ex_pool.py000066400000000000000000000155061455552142400241160ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.10/examples/ex_synchronize.py000066400000000000000000000144041455552142400255140ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.10/examples/ex_webserver.py000066400000000000000000000041001455552142400251350ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.10/examples/ex_workers.py000066400000000000000000000042241455552142400246340ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.10/index.html000066400000000000000000000117511455552142400222540ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/000077500000000000000000000000001455552142400230035ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/__init__.py000066400000000000000000000035001455552142400251120ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/connection.py000066400000000000000000000761431455552142400255270ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/context.py000066400000000000000000000265321455552142400250510ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) @staticmethod def _after_fork(): return _default_context.get_context().Process._after_fork() class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/dummy/000077500000000000000000000000001455552142400241365ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262640ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266550ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/forkserver.py000066400000000000000000000275521455552142400255600ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/heap.py000066400000000000000000000265521455552142400243040ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/managers.py000066400000000000000000001347121455552142400251620ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def _handle_request(self, c): request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) def handle_request(self, conn): ''' Handle a new connection ''' try: self._handle_request(conn) except SystemExit: # Server.serve_client() calls sys.exit(0) on EOF pass finally: conn.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/pool.py000066400000000000000000000777671455552142400243560ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if maxtasksperchild is not None: if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: raise ValueError("maxtasksperchild must be a positive int or None") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/popen_fork.py000066400000000000000000000045061455552142400255240ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267530ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400271420ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400267430ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/process.py000066400000000000000000000274631455552142400250470ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: self._after_fork() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode @staticmethod def _after_fork(): from . import util util._finalizer_registry.clear() util._run_after_forkers() # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/queues.py000066400000000000000000000275531455552142400247000ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/reduction.py000066400000000000000000000226451455552142400253620ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400265460ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/resource_tracker.py000066400000000000000000000215401455552142400267210ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem # Use sem_unlink() to clean up named semaphores. # # sem_unlink() may be missing if the Python build process detected the # absence of POSIX named semaphores. In that case, no named semaphores were # ever opened, so no cleanup would be necessary. if hasattr(_multiprocessing, 'sem_unlink'): _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, }) _CLEANUP_FUNCS.update({ 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(msg) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('msg too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/shared_memory.py000066400000000000000000000440321455552142400262160ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True from . import resource_tracker _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise resource_tracker.register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) try: size = _winapi.VirtualQuerySize(p_buf) finally: _winapi.UnmapViewOfFile(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: _posixshmem.shm_unlink(self._name) resource_tracker.unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260560ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/spawn.py000066400000000000000000000221151455552142400245060ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/synchronize.py000066400000000000000000000270651455552142400257420ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/000077500000000000000000000000001455552142400241455ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/__init__.py000066400000000000000000006046571455552142400263000ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import textwrap import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock") # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) #self.assertIn("1/0 # MARKER", err) #FIXME @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_nested_queue(self): a = self.list() # Test queue inside list a.append(self.Queue()) a[0].put(123) self.assertEqual(a[0].get(), 123) b = self.dict() # Test queue inside dict b[0] = self.Queue() b[0].put(456) self.assertEqual(b[0].get(), 456) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = time.monotonic() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(time.monotonic() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs gc.collect() # For PyPy or other GCs. time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with warnings_helper.check_warnings( ('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_pool_maxtasksperchild_invalid(self): for value in [0, -1, 0.5, "12"]: with self.assertRaises(ValueError): multiprocessing.Pool(3, maxtasksperchild=value) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue @hashlib_helper.requires_hashdigest('md5') class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() @unittest.skipIf(True, "fails with dill >= 0.3.5") def test_shared_memory_recreate(self): # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) def test_invalid_shared_memory_cration(self): # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_pickle_unpickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) sms.buf[0:6] = b'pickle' # Test pickling pickled_sms = pickle.dumps(sms, protocol=proto) # Test unpickling sms2 = pickle.loads(pickled_sms) self.assertIsInstance(sms2, shared_memory.SharedMemory) self.assertEqual(sms.name, sms2.name) self.assertEqual(bytes(sms.buf[0:6]), b'pickle') self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') # Test that unpickled version is still the same SharedMemory sms.buf[0:6] = b'newval' self.assertEqual(bytes(sms.buf[0:6]), b'newval') self.assertEqual(bytes(sms2.buf[0:6]), b'newval') sms2.buf[0:6] = b'oldval' self.assertEqual(bytes(sms.buf[0:6]), b'oldval') self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') def test_shared_memory_pickle_unpickle_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sms.close() sms.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(pickled_sms) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl, protocol=proto) deserialized_sl = pickle.loads(serialized_sl) self.assertIsInstance( deserialized_sl, shared_memory.ShareableList) self.assertEqual(deserialized_sl[-1], 9) self.assertIsNot(sl, deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") sl[3] = "newvalue" self.assertEqual(deserialized_sl[3], "newvalue") larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_ShareableList_pickling_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) serialized_sl = pickle.dumps(sl, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sl.shm.close() sl.shm.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(serialized_sl) def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT t = 0.1 while time.monotonic() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # Test to verify that `Finalize` works. # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], expected) delta = time.monotonic() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = time.monotonic() res = wait([a, b], 20) delta = time.monotonic() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT while time.monotonic() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) def test_too_long_name_resource(self): # gh-96819: Resource names that will make the length of a write to a pipe # greater than PIPE_BUF are not allowed rtype = "shared_memory" too_long_name_resource = "a" * (512 - len(rtype)) with self.assertRaises(ValueError): resource_tracker.register(too_long_name_resource, rtype) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class TestNamedResource(unittest.TestCase): @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") def test_global_named_resource_spawn(self): # # gh-90549: Check that global named resources in main module # will not leak by a subprocess, in spawn context. # testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) with open(testfn, 'w', encoding='utf-8') as f: f.write(textwrap.dedent('''\ import multiprocess as mp ctx = mp.get_context('spawn') global_resource = ctx.Semaphore() def submain(): pass if __name__ == '__main__': p = ctx.Process(target=submain) p.start() p.join() ''')) rc, out, err = test.support.script_helper.assert_python_ok(testfn, **ENV) # on error, err = 'UserWarning: resource_tracker: There appear to # be 1 leaked semaphore objects to clean up at shutdown' self.assertEqual(err, b'') class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') @unittest.skipIf(sys.platform != "linux", "Linux only") @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") class SemLockTests(unittest.TestCase): def test_semlock_subclass(self): class SemLock(_multiprocessing.SemLock): pass name = f'test_semlock_subclass-{os.getpid()}' s = SemLock(1, 0, 10, name, False) _multiprocessing.sem_unlink(name) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400262360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400271500ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400266460ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400320320ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400332600ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000273621455552142400336670ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support from test.support import import_helper # Skip tests if _multiprocessing wasn't built. import_helper.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support import os_helper from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400322150ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.10/multiprocess/util.py000066400000000000000000000332741455552142400243430ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, encoding="utf-8", closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None, None, None, -1, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/py3.11/000077500000000000000000000000001455552142400202535ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/Modules/000077500000000000000000000000001455552142400216635ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/000077500000000000000000000000001455552142400245535ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400260145ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/clinic/multiprocessing.c.h000066400000000000000000000075701455552142400316460ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_closesocket__doc__, "closesocket($module, handle, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF \ {"closesocket", (PyCFunction)_multiprocessing_closesocket, METH_O, _multiprocessing_closesocket__doc__}, static PyObject * _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle); static PyObject * _multiprocessing_closesocket(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; HANDLE handle; if (!PyArg_Parse(arg, ""F_HANDLE":closesocket", &handle)) { goto exit; } return_value = _multiprocessing_closesocket_impl(module, handle); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_recv__doc__, "recv($module, handle, size, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_RECV_METHODDEF \ {"recv", _PyCFunction_CAST(_multiprocessing_recv), METH_FASTCALL, _multiprocessing_recv__doc__}, static PyObject * _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size); static PyObject * _multiprocessing_recv(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; int size; if (!_PyArg_ParseStack(args, nargs, ""F_HANDLE"i:recv", &handle, &size)) { goto exit; } return_value = _multiprocessing_recv_impl(module, handle, size); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_send__doc__, "send($module, handle, buf, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEND_METHODDEF \ {"send", _PyCFunction_CAST(_multiprocessing_send), METH_FASTCALL, _multiprocessing_send__doc__}, static PyObject * _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf); static PyObject * _multiprocessing_send(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; Py_buffer buf = {NULL, NULL}; if (!_PyArg_ParseStack(args, nargs, ""F_HANDLE"y*:send", &handle, &buf)) { goto exit; } return_value = _multiprocessing_send_impl(module, handle, &buf); exit: /* Cleanup for buf */ if (buf.obj) { PyBuffer_Release(&buf); } return return_value; } #endif /* defined(MS_WINDOWS) */ PyDoc_STRVAR(_multiprocessing_sem_unlink__doc__, "sem_unlink($module, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEM_UNLINK_METHODDEF \ {"sem_unlink", (PyCFunction)_multiprocessing_sem_unlink, METH_O, _multiprocessing_sem_unlink__doc__}, static PyObject * _multiprocessing_sem_unlink_impl(PyObject *module, const char *name); static PyObject * _multiprocessing_sem_unlink(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; const char *name; if (!PyUnicode_Check(arg)) { _PyArg_BadArgument("sem_unlink", "argument", "str", arg); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(arg, &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } return_value = _multiprocessing_sem_unlink_impl(module, name); exit: return return_value; } #ifndef _MULTIPROCESSING_CLOSESOCKET_METHODDEF #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF #endif /* !defined(_MULTIPROCESSING_CLOSESOCKET_METHODDEF) */ #ifndef _MULTIPROCESSING_RECV_METHODDEF #define _MULTIPROCESSING_RECV_METHODDEF #endif /* !defined(_MULTIPROCESSING_RECV_METHODDEF) */ #ifndef _MULTIPROCESSING_SEND_METHODDEF #define _MULTIPROCESSING_SEND_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEND_METHODDEF) */ /*[clinic end generated code: output=d3bbf69de578db7b input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000072371455552142400306130ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", _PyCFunction_CAST(_posixshmem_shm_open), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", "flags", "mode", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_open", 0}; PyObject *argsbuf[3]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; PyObject *path; int flags; int mode = 511; int _return_value; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; flags = _PyLong_AsInt(args[1]); if (flags == -1 && PyErr_Occurred()) { goto exit; } if (!noptargs) { goto skip_optional_pos; } mode = _PyLong_AsInt(args[2]); if (mode == -1 && PyErr_Occurred()) { goto exit; } skip_optional_pos: _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", _PyCFunction_CAST(_posixshmem_shm_unlink), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_unlink", 0}; PyObject *argsbuf[1]; PyObject *path; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=a6db931a47d36e1b input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/clinic/semaphore.c.h000066400000000000000000000346601455552142400304020ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "acquire", 0}; PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = _PyLong_AsInt(args[0]); if (blocking == -1 && PyErr_Occurred()) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "acquire", 0}; PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = _PyLong_AsInt(args[0]); if (blocking == -1 && PyErr_Occurred()) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) static PyObject * _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink); static PyObject * _multiprocessing_SemLock(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; static const char * const _keywords[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "SemLock", 0}; PyObject *argsbuf[5]; PyObject * const *fastargs; Py_ssize_t nargs = PyTuple_GET_SIZE(args); int kind; int value; int maxvalue; const char *name; int unlink; fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 5, 5, 0, argsbuf); if (!fastargs) { goto exit; } kind = _PyLong_AsInt(fastargs[0]); if (kind == -1 && PyErr_Occurred()) { goto exit; } value = _PyLong_AsInt(fastargs[1]); if (value == -1 && PyErr_Occurred()) { goto exit; } maxvalue = _PyLong_AsInt(fastargs[2]); if (maxvalue == -1 && PyErr_Occurred()) { goto exit; } if (!PyUnicode_Check(fastargs[3])) { _PyArg_BadArgument("SemLock", "argument 'name'", "str", fastargs[3]); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(fastargs[3], &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } unlink = _PyLong_AsInt(fastargs[4]); if (unlink == -1 && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_SemLock_impl(type, kind, value, maxvalue, name, unlink); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__rebuild__doc__, "_rebuild($type, handle, kind, maxvalue, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF \ {"_rebuild", _PyCFunction_CAST(_multiprocessing_SemLock__rebuild), METH_FASTCALL|METH_CLASS, _multiprocessing_SemLock__rebuild__doc__}, static PyObject * _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name); static PyObject * _multiprocessing_SemLock__rebuild(PyTypeObject *type, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; SEM_HANDLE handle; int kind; int maxvalue; const char *name; if (!_PyArg_ParseStack(args, nargs, ""F_SEM_HANDLE"iiz:_rebuild", &handle, &kind, &maxvalue, &name)) { goto exit; } return_value = _multiprocessing_SemLock__rebuild_impl(type, handle, kind, maxvalue, name); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__count__doc__, "_count($self, /)\n" "--\n" "\n" "Num of `acquire()`s minus num of `release()`s for this process."); #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF \ {"_count", (PyCFunction)_multiprocessing_SemLock__count, METH_NOARGS, _multiprocessing_SemLock__count__doc__}, static PyObject * _multiprocessing_SemLock__count_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__count_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_mine__doc__, "_is_mine($self, /)\n" "--\n" "\n" "Whether the lock is owned by this thread."); #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF \ {"_is_mine", (PyCFunction)_multiprocessing_SemLock__is_mine, METH_NOARGS, _multiprocessing_SemLock__is_mine__doc__}, static PyObject * _multiprocessing_SemLock__is_mine_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_mine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_mine_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, "_get_value($self, /)\n" "--\n" "\n" "Get the value of the semaphore."); #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__get_value(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__get_value_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, "_is_zero($self, /)\n" "--\n" "\n" "Return whether semaphore has value zero."); #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, static PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_zero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_zero_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__after_fork__doc__, "_after_fork($self, /)\n" "--\n" "\n" "Rezero the net acquisition count after fork()."); #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF \ {"_after_fork", (PyCFunction)_multiprocessing_SemLock__after_fork, METH_NOARGS, _multiprocessing_SemLock__after_fork__doc__}, static PyObject * _multiprocessing_SemLock__after_fork_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__after_fork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__after_fork_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___enter____doc__, "__enter__($self, /)\n" "--\n" "\n" "Enter the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF \ {"__enter__", (PyCFunction)_multiprocessing_SemLock___enter__, METH_NOARGS, _multiprocessing_SemLock___enter____doc__}, static PyObject * _multiprocessing_SemLock___enter___impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock___enter__(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock___enter___impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___exit____doc__, "__exit__($self, exc_type=None, exc_value=None, exc_tb=None, /)\n" "--\n" "\n" "Exit the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF \ {"__exit__", _PyCFunction_CAST(_multiprocessing_SemLock___exit__), METH_FASTCALL, _multiprocessing_SemLock___exit____doc__}, static PyObject * _multiprocessing_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb); static PyObject * _multiprocessing_SemLock___exit__(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; PyObject *exc_type = Py_None; PyObject *exc_value = Py_None; PyObject *exc_tb = Py_None; if (!_PyArg_CheckPositional("__exit__", nargs, 0, 3)) { goto exit; } if (nargs < 1) { goto skip_optional; } exc_type = args[0]; if (nargs < 2) { goto skip_optional; } exc_value = args[1]; if (nargs < 3) { goto skip_optional; } exc_tb = args[2]; skip_optional: return_value = _multiprocessing_SemLock___exit___impl(self, exc_type, exc_value, exc_tb); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #ifndef _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF) */ /*[clinic end generated code: output=64ba32544811c9e6 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/multiprocess.c000066400000000000000000000153031455552142400274520ustar00rootroot00000000000000/* * Extension module used by multiprocess package * * multiprocess.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /*[python input] class HANDLE_converter(CConverter): type = "HANDLE" format_unit = '"F_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=9fad6080b79ace91]*/ /*[clinic input] module _multiprocess [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=01e0745f380ac6e3]*/ #include "clinic/multiprocessing.c.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS /*[clinic input] _multiprocess.closesocket handle: HANDLE / [clinic start generated code]*/ static PyObject * _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) /*[clinic end generated code: output=214f359f900966f4 input=8a20706dd386c6cc]*/ { int ret; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } /*[clinic input] _multiprocess.recv handle: HANDLE size: int / [clinic start generated code]*/ static PyObject * _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) /*[clinic end generated code: output=92322781ba9ff598 input=6a5b0834372cee5b]*/ { int nread; PyObject *buf; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } /*[clinic input] _multiprocess.send handle: HANDLE buf: Py_buffer / [clinic start generated code]*/ static PyObject * _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) /*[clinic end generated code: output=52d7df0519c596cb input=41dce742f98d2210]*/ { int ret, length; length = (int)Py_MIN(buf->len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf->buf, length, 0); Py_END_ALLOW_THREADS if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /*[clinic input] _multiprocess.sem_unlink name: str / [clinic start generated code]*/ static PyObject * _multiprocess_sem_unlink_impl(PyObject *module, const char *name) /*[clinic end generated code: output=fcbfeb1ed255e647 input=bf939aff9564f1d5]*/ { return _PyMp_sem_unlink(name); } /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS _MULTIPROCESSING_CLOSESOCKET_METHODDEF _MULTIPROCESSING_RECV_METHODDEF _MULTIPROCESSING_SEND_METHODDEF #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) _MULTIPROCESSING_SEM_UNLINK_METHODDEF #endif {NULL} }; /* * Initialize */ static int multiprocess_exec(PyObject *module) { #ifdef HAVE_MP_SEMAPHORE /* Add _PyMp_SemLock type to module */ if (PyModule_AddType(module, &_PyMp_SemLockType) < 0) { return -1; } { PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) py_sem_value_max = PyLong_FromLong(INT_MAX); else py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); if (py_sem_value_max == NULL) { return -1; } if (PyDict_SetItemString(_PyMp_SemLockType.tp_dict, "SEM_VALUE_MAX", py_sem_value_max) < 0) { Py_DECREF(py_sem_value_max); return -1; } Py_DECREF(py_sem_value_max); } #endif /* Add configuration macros */ PyObject *flags = PyDict_New(); if (!flags) { return -1; } #define ADD_FLAG(name) \ do { \ PyObject *value = PyLong_FromLong(name); \ if (value == NULL) { \ Py_DECREF(flags); \ return -1; \ } \ if (PyDict_SetItemString(flags, #name, value) < 0) { \ Py_DECREF(flags); \ Py_DECREF(value); \ return -1; \ } \ Py_DECREF(value); \ } while (0) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", flags) < 0) { Py_DECREF(flags); return -1; } return 0; } static PyModuleDef_Slot multiprocess_slots[] = { {Py_mod_exec, multiprocess_exec}, {0, NULL} }; static struct PyModuleDef multiprocess_module = { PyModuleDef_HEAD_INIT, .m_name = "_multiprocess", .m_methods = module_methods, .m_slots = multiprocess_slots, }; PyMODINIT_FUNC PyInit__multiprocess(void) { return PyModuleDef_Init(&multiprocess_module); } uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/multiprocess.h000066400000000000000000000042301455552142400274540ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX # define HAVE_MP_SEMAPHORE #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # define HAVE_MP_SEMAPHORE # include typedef sem_t *SEM_HANDLE; # endif #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyTypeObject _PyMp_SemLockType; extern PyObject *_PyMp_sem_unlink(const char *name); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/posixshmem.c000066400000000000000000000055071455552142400271220ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #define PY_SSIZE_T_CLEAN #include // for shm_open() and shm_unlink() #ifdef HAVE_SYS_MMAN_H #include #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static struct PyModuleDef _posixshmemmodule = { PyModuleDef_HEAD_INIT, .m_name = "_posixshmem", .m_doc = "POSIX shared memory module", .m_size = 0, .m_methods = module_methods, }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { return PyModuleDef_Init(&_posixshmemmodule); } uqfoundation-multiprocess-b3457a5/py3.11/Modules/_multiprocess/semaphore.c000066400000000000000000000522341455552142400267100ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" #ifdef HAVE_MP_SEMAPHORE enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; /*[python input] class SEM_HANDLE_converter(CConverter): type = "SEM_HANDLE" format_unit = '"F_SEM_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3e0ad43e482d8716]*/ /*[clinic input] module _multiprocess class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=935fb41b7d032599]*/ #include "clinic/semaphore.c.h" #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool(accept={int}) = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ { double timeout; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif // ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } // #endif /* !HAVE_SEM_TIMEDWAIT */ /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool(accept={int}) = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ { int res, err = 0; struct timespec deadline = {0}; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) { return NULL; } if (timeout < 0.0) { timeout = 0.0; } struct timeval now; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } long sec = (long) timeout; long nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } /*[clinic input] @classmethod _multiprocess.SemLock.__new__ kind: int value: int maxvalue: int name: str unlink: bool(accept={int}) [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink) /*[clinic end generated code: output=30727e38f5f7577a input=b378c3ee27d3a0fa]*/ { SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); return NULL; } /*[clinic input] @classmethod _multiprocess.SemLock._rebuild handle: SEM_HANDLE kind: int maxvalue: int name: str(accept={str, NoneType}) / [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name) /*[clinic end generated code: output=2aaee14f063f3bd9 input=f7040492ac6d9962]*/ { char *name_copy = NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyErr_SetFromErrno(PyExc_OSError); PyMem_Free(name_copy); return NULL; } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); Py_TYPE(self)->tp_free((PyObject*)self); } /*[clinic input] _multiprocess.SemLock._count Num of `acquire()`s minus num of `release()`s for this process. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__count_impl(SemLockObject *self) /*[clinic end generated code: output=5ba8213900e517bb input=36fc59b1cd1025ab]*/ { return PyLong_FromLong((long)self->count); } /*[clinic input] _multiprocess.SemLock._is_mine Whether the lock is owned by this thread. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_mine_impl(SemLockObject *self) /*[clinic end generated code: output=92dc98863f4303be input=a96664cb2f0093ba]*/ { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } /*[clinic input] _multiprocess.SemLock._get_value Get the value of the semaphore. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__get_value_impl(SemLockObject *self) /*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } /*[clinic input] _multiprocess.SemLock._is_zero Return whether semaphore has value zero. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_zero_impl(SemLockObject *self) /*[clinic end generated code: output=815d4c878c806ed7 input=294a446418d31347]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } /*[clinic input] _multiprocess.SemLock._after_fork Rezero the net acquisition count after fork(). [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__after_fork_impl(SemLockObject *self) /*[clinic end generated code: output=718bb27914c6a6c1 input=190991008a76621e]*/ { self->count = 0; Py_RETURN_NONE; } /*[clinic input] _multiprocess.SemLock.__enter__ Enter the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___enter___impl(SemLockObject *self) /*[clinic end generated code: output=beeb2f07c858511f input=c5e27d594284690b]*/ { return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); } /*[clinic input] _multiprocess.SemLock.__exit__ exc_type: object = None exc_value: object = None exc_tb: object = None / Exit the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb) /*[clinic end generated code: output=3b37c1a9f8b91a03 input=7d644b64a89903f8]*/ { return _multiprocess_SemLock_release_impl(self); } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { _MULTIPROCESS_SEMLOCK_ACQUIRE_METHODDEF _MULTIPROCESS_SEMLOCK_RELEASE_METHODDEF _MULTIPROCESS_SEMLOCK___ENTER___METHODDEF _MULTIPROCESS_SEMLOCK___EXIT___METHODDEF _MULTIPROCESS_SEMLOCK__COUNT_METHODDEF _MULTIPROCESS_SEMLOCK__IS_MINE_METHODDEF _MULTIPROCESS_SEMLOCK__GET_VALUE_METHODDEF _MULTIPROCESS_SEMLOCK__IS_ZERO_METHODDEF _MULTIPROCESS_SEMLOCK__REBUILD_METHODDEF _MULTIPROCESS_SEMLOCK__AFTER_FORK_METHODDEF {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(SemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(SemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject _PyMp_SemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_multiprocess.SemLock", /* tp_basicsize */ sizeof(SemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)semlock_dealloc, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ semlock_methods, /* tp_members */ semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ _multiprocess_SemLock, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(const char *name) { if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } #endif // HAVE_MP_SEMAPHORE uqfoundation-multiprocess-b3457a5/py3.11/README_MODS000066400000000000000000001323461455552142400217660ustar00rootroot00000000000000cp -rf py3.10/examples . cp -rf py3.10/doc . cp -f py3.10/index.html . cp -rf py3.10/_multiprocess _multiprocess cp -rf py3.10/multiprocess multiprocess cp -rf py3.10/Modules/_multiprocess Modules/_multiprocess # ---------------------------------------------------------------------- diff Python-3.10.0rc2/Lib/multiprocessing/connection.py Python-3.11.0a1/Lib/multiprocessing/connection.py 946c946 < # Make connection and socket objects sharable if possible --- > # Make connection and socket objects shareable if possible diff Python-3.10.0rc2/Lib/multiprocessing/managers.py Python-3.11.0a1/Lib/multiprocessing/managers.py 1341d1340 < pass # ---------------------------------------------------------------------- diff Python-3.10.0rc2/Lib/test/_test_multiprocessing.py Python-3.11.0a1/Lib/test/_test_multiprocessing.py 613a614 > gc.collect() # For PyPy or other GCs. 2669a2671 > gc.collect() # For PyPy or other GCs. 3773a3776,3781 > def _new_shm_name(self, prefix): > # Add a PID to the name of a POSIX shared memory object to allow > # running multiprocessing tests (test_multiprocessing_fork, > # test_multiprocessing_spawn, etc) in parallel. > return prefix + str(os.getpid()) > 3775c3783,3784 < sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) --- > name_tsmb = self._new_shm_name('test01_tsmb') > sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) 3779c3788 < self.assertEqual(sms.name, 'test01_tsmb') --- > self.assertEqual(sms.name, name_tsmb) 3787,3793d3795 < # Test pickling < sms.buf[0:6] = b'pickle' < pickled_sms = pickle.dumps(sms) < sms2 = pickle.loads(pickled_sms) < self.assertEqual(sms.name, sms2.name) < self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle') < 3799c3801 < also_sms = shared_memory.SharedMemory('test01_tsmb') --- > also_sms = shared_memory.SharedMemory(name_tsmb) 3804c3806 < same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) --- > same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) 3822c3824 < names = ['test01_fn', 'test02_fn'] --- > names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] 3843a3846,3851 > name_dblunlink = self._new_shm_name('test01_dblunlink') > sms_uno = shared_memory.SharedMemory( > name_dblunlink, > create=True, > size=5000 > ) 3845,3850d3852 < sms_uno = shared_memory.SharedMemory( < 'test01_dblunlink', < create=True, < size=5000 < ) < 3854c3856 < sms_duo = shared_memory.SharedMemory('test01_dblunlink') --- > sms_duo = shared_memory.SharedMemory(name_dblunlink) 3866c3868 < 'test01_tsmb', --- > name_tsmb, 3880c3882 < ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') --- > ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) 3891a3894,3916 > def test_shared_memory_recreate(self): > # Test if shared memory segment is created properly, > # when _make_filename returns an existing shared memory segment name > with unittest.mock.patch( > 'multiprocessing.shared_memory._make_filename') as mock_make_filename: > > NAME_PREFIX = shared_memory._SHM_NAME_PREFIX > names = ['test01_fn', 'test02_fn'] > # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary > # because some POSIX compliant systems require name to start with / > names = [NAME_PREFIX + name for name in names] > > mock_make_filename.side_effect = names > shm1 = shared_memory.SharedMemory(create=True, size=1) > self.addCleanup(shm1.unlink) > self.assertEqual(shm1._name, names[0]) > > mock_make_filename.side_effect = names > shm2 = shared_memory.SharedMemory(create=True, size=1) > self.addCleanup(shm2.unlink) > self.assertEqual(shm2._name, names[1]) > > def test_invalid_shared_memory_cration(self): 3903a3929,3969 > def test_shared_memory_pickle_unpickle(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sms = shared_memory.SharedMemory(create=True, size=512) > self.addCleanup(sms.unlink) > sms.buf[0:6] = b'pickle' > > # Test pickling > pickled_sms = pickle.dumps(sms, protocol=proto) > > # Test unpickling > sms2 = pickle.loads(pickled_sms) > self.assertIsInstance(sms2, shared_memory.SharedMemory) > self.assertEqual(sms.name, sms2.name) > self.assertEqual(bytes(sms.buf[0:6]), b'pickle') > self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') > > # Test that unpickled version is still the same SharedMemory > sms.buf[0:6] = b'newval' > self.assertEqual(bytes(sms.buf[0:6]), b'newval') > self.assertEqual(bytes(sms2.buf[0:6]), b'newval') > > sms2.buf[0:6] = b'oldval' > self.assertEqual(bytes(sms.buf[0:6]), b'oldval') > self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') > > def test_shared_memory_pickle_unpickle_dead_object(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sms = shared_memory.SharedMemory(create=True, size=512) > sms.buf[0:6] = b'pickle' > pickled_sms = pickle.dumps(sms, protocol=proto) > > # Now, we are going to kill the original object. > # So, unpickled one won't be able to attach to it. > sms.close() > sms.unlink() > > with self.assertRaises(FileNotFoundError): > pickle.loads(pickled_sms) > 4085c4151,4152 < sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') --- > name_duplicate = self._new_shm_name('test03_duplicate') > sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) 4088c4155 < self.assertEqual('test03_duplicate', sl_copy.shm.name) --- > self.assertEqual(name_duplicate, sl_copy.shm.name) 4120,4139c4187,4222 < sl = shared_memory.ShareableList(range(10)) < self.addCleanup(sl.shm.unlink) < < serialized_sl = pickle.dumps(sl) < deserialized_sl = pickle.loads(serialized_sl) < self.assertTrue( < isinstance(deserialized_sl, shared_memory.ShareableList) < ) < self.assertTrue(deserialized_sl[-1], 9) < self.assertFalse(sl is deserialized_sl) < deserialized_sl[4] = "changed" < self.assertEqual(sl[4], "changed") < < # Verify data is not being put into the pickled representation. < name = 'a' * len(sl.shm.name) < larger_sl = shared_memory.ShareableList(range(400)) < self.addCleanup(larger_sl.shm.unlink) < serialized_larger_sl = pickle.dumps(larger_sl) < self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) < larger_sl.shm.close() --- > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sl = shared_memory.ShareableList(range(10)) > self.addCleanup(sl.shm.unlink) > > serialized_sl = pickle.dumps(sl, protocol=proto) > deserialized_sl = pickle.loads(serialized_sl) > self.assertIsInstance( > deserialized_sl, shared_memory.ShareableList) > self.assertEqual(deserialized_sl[-1], 9) > self.assertIsNot(sl, deserialized_sl) > > deserialized_sl[4] = "changed" > self.assertEqual(sl[4], "changed") > sl[3] = "newvalue" > self.assertEqual(deserialized_sl[3], "newvalue") > > larger_sl = shared_memory.ShareableList(range(400)) > self.addCleanup(larger_sl.shm.unlink) > serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) > self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) > larger_sl.shm.close() > > deserialized_sl.shm.close() > sl.shm.close() > > def test_shared_memory_ShareableList_pickling_dead_object(self): > for proto in range(pickle.HIGHEST_PROTOCOL + 1): > with self.subTest(proto=proto): > sl = shared_memory.ShareableList(range(10)) > serialized_sl = pickle.dumps(sl, protocol=proto) > > # Now, we are going to kill the original object. > # So, unpickled one won't be able to attach to it. > sl.shm.close() > sl.shm.unlink() 4141,4142c4224,4225 < deserialized_sl.shm.close() < sl.shm.close() --- > with self.assertRaises(FileNotFoundError): > pickle.loads(serialized_sl) 4178a4262,4268 > # Without this line it was raising warnings like: > # UserWarning: resource_tracker: > # There appear to be 1 leaked shared_memory > # objects to clean up at shutdown > # See: https://bugs.python.org/issue45209 > resource_tracker.unregister(f"/{name}", "shared_memory") > 4188c4278 < # --- > # Test to verify that `Finalize` works. 4199a4290 > gc.collect() # For PyPy or other GCs. 4210a4302 > gc.collect() # For PyPy or other GCs. 4216a4309 > gc.collect() # For PyPy or other GCs. g # ---------------------------------------------------------------------- diff Python-3.11.0a1/Lib/test/_test_multiprocessing.py py3.11/multiprocess/tests/__init__.py 3821c3825 < 'multiprocessing.shared_memory._make_filename') as mock_make_filename: --- > 'multiprocess.shared_memory._make_filename') as mock_make_filename: 3898c3902 < 'multiprocessing.shared_memory._make_filename') as mock_make_filename: --- > 'multiprocess.shared_memory._make_filename') as mock_make_filename: # ---------------------------------------------------------------------- diff Python-3.11.0a1/Modules/_multiprocessing/multiprocessing.c Python-3.11.0a3/Modules/_multiprocessing/multiprocessing.c 189,190c189 < #if defined(MS_WINDOWS) || \ < (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) --- > #ifdef HAVE_MP_SEMAPHORE diff Python-3.11.0a1/Modules/_multiprocessing/multiprocessing.h Python-3.11.0a3/Modules/_multiprocessing/multiprocessing.h 23a24 > # define HAVE_MP_SEMAPHORE 26a28 > # define HAVE_MP_SEMAPHORE diff Python-3.11.0a1/Modules/_multiprocessing/semaphore.c Python-3.11.0a3/Modules/_multiprocessing/semaphore.c 11a12,13 > #ifdef HAVE_MP_SEMAPHORE > 796a799,800 > > #endif // HAVE_MP_SEMAPHORE # ---------------------------------------------------------------------- cp Python-3.11.0a3/Modules/_multiprocessing/clinic/* py3.11/Modules/_multiprocess/clinic # ---------------------------------------------------------------------- diff Python-3.11.0a3/Lib/multiprocessing/synchronize.py Python-3.11.0a4/Lib/multiprocessing/synchronize.py 355a356,358 > def __repr__(self) -> str: > set_status = 'set' if self.is_set() else 'unset' > return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" diff Python-3.11.0a3/Lib/test/_test_multiprocessing.py Python-3.11.0a4/Lib/test/_test_multiprocessing.py 1648c1648,1661 < # --- > def test_repr(self) -> None: > event = self.Event() > if self.TYPE == 'processes': > self.assertRegex(repr(event), r"") > event.set() > self.assertRegex(repr(event), r"") > event.clear() > self.assertRegex(repr(event), r"") > elif self.TYPE == 'manager': > self.assertRegex(repr(event), r" event.set() > self.assertRegex(repr(event), r" > # ---------------------------------------------------------------------- diff Python-3.11.0a4/Lib/multiprocessing/managers.py Python-3.11.0a5/Lib/multiprocessing/managers.py 52,56c52,56 < if view_types[0] is not list: # only needed in Py3.0 < def rebuild_as_list(obj): < return list, (list(obj),) < for view_type in view_types: < reduction.register(view_type, rebuild_as_list) --- > def rebuild_as_list(obj): > return list, (list(obj),) > for view_type in view_types: > reduction.register(view_type, rebuild_as_list) > del view_type, view_types diff Python-3.11.0a4/Lib/multiprocessing/process.py Python-3.11.0a5/Lib/multiprocessing/process.py 429a430 > del name, signum # ---------------------------------------------------------------------- diff Python-3.11.0a5/Lib/test/_test_multiprocessing.py Python-3.11.0a7/Lib/test/_test_multiprocessing.py 75a76,81 > if support.check_sanitizer(address=True): > # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to > # work around a libasan race condition: dead lock in pthread_create(). > raise unittest.SkipTest("libasan has a pthread_create() dead lock") > > 249a256,279 > def test_args_argument(self): > # bpo-45735: Using list or tuple as *args* in constructor could > # achieve the same effect. > args_cases = (1, "str", [1], (1,)) > args_types = (list, tuple) > > test_cases = itertools.product(args_cases, args_types) > > for args, args_type in test_cases: > with self.subTest(args=args, args_type=args_type): > q = self.Queue(1) > # pass a tuple or list as args > p = self.Process(target=self._test_args, args=args_type((q, args))) > p.daemon = True > p.start() > child_args = q.get() > self.assertEqual(child_args, args) > p.join() > close_queue(q) > > @classmethod > def _test_args(cls, q, arg): > q.put(arg) > # ---------------------------------------------------------------------- diff Python-3.11.0a7/Lib/multiprocessing/connection.py Python-3.11.0b1/Lib/multiprocessing/connection.py 191d190 < # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) 193,194c192,193 < m = memoryview(bytes(m)) < n = len(m) --- > m = m.cast('B') > n = m.nbytes Common subdirectories: Python-3.11.0a7/Lib/multiprocessing/dummy and Python-3.11.0b1/Lib/multiprocessing/dummy diff Python-3.11.0a7/Lib/multiprocessing/managers.py Python-3.11.0b1/Lib/multiprocessing/managers.py 500c500 < ctx=None): --- > ctx=None, *, shutdown_timeout=1.0): 509a510 > self._shutdown_timeout = shutdown_timeout 573,574c574,575 < args=(self._process, self._address, self._authkey, < self._state, self._Client), --- > args=(self._process, self._address, self._authkey, self._state, > self._Client, self._shutdown_timeout), 659c660,661 < def _finalize_manager(process, address, authkey, state, _Client): --- > def _finalize_manager(process, address, authkey, state, _Client, > shutdown_timeout): 674c676 < process.join(timeout=1.0) --- > process.join(timeout=shutdown_timeout) 680c682 < process.join(timeout=0.1) --- > process.join(timeout=shutdown_timeout) 682a685,686 > process.kill() > process.join() diff Python-3.11.0a7/Lib/multiprocessing/queues.py Python-3.11.0b1/Lib/multiprocessing/queues.py 142,148c142,145 < try: < self._reader.close() < finally: < close = self._close < if close: < self._close = None < close() --- > close = self._close > if close: > self._close = None > close() 172,173c169,171 < self._wlock, self._writer.close, self._ignore_epipe, < self._on_queue_feeder_error, self._sem), --- > self._wlock, self._reader.close, self._writer.close, > self._ignore_epipe, self._on_queue_feeder_error, > self._sem), 214,215c212,213 < def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, < onerror, queue_sem): --- > def _feed(buffer, notempty, send_bytes, writelock, reader_close, > writer_close, ignore_epipe, onerror, queue_sem): 241c239,240 < close() --- > reader_close() > writer_close() diff Python-3.11.0a7/Lib/multiprocessing/spawn.py Python-3.11.0b1/Lib/multiprocessing/spawn.py 36,40d35 < if WINSERVICE: < _python_exe = os.path.join(sys.exec_prefix, 'python.exe') < else: < _python_exe = sys.executable < 43c38,41 < _python_exe = exe --- > if sys.platform == 'win32': > _python_exe = os.fsdecode(exe) > else: > _python_exe = os.fsencode(exe) 47a46,50 > if WINSERVICE: > set_executable(os.path.join(sys.exec_prefix, 'python.exe')) > else: > set_executable(sys.executable) > 89c92,93 < return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] --- > exe = get_executable() > return [exe] + opts + ['-c', prog, '--multiprocessing-fork'] diff Python-3.11.0a7/Lib/multiprocessing/util.py Python-3.11.0b1/Lib/multiprocessing/util.py 123c123 < raise TypeError('address type of {address!r} unrecognized') --- > raise TypeError(f'address type of {address!r} unrecognized') 448a449 > import subprocess 453c454 < args, [os.fsencode(path)], True, passfds, None, None, --- > args, [path], True, passfds, None, None, 455c456,457 < False, False, None, None, None, -1, None) --- > False, False, -1, None, None, None, -1, None, > subprocess._USE_VFORK) # ---------------------------------------------------------------------- diff Python-3.11.0b1/Lib/multiprocessing/context.py Python-3.11.0b5/Lib/multiprocessing/context.py 225a226,229 > @staticmethod > def _after_fork(): > return _default_context.get_context().Process._after_fork() > 285a290,294 > @staticmethod > def _after_fork(): > # process is spawned, nothing to do > pass > 328a338,342 > @staticmethod > def _after_fork(): > # process is spawned, nothing to do > pass > Common subdirectories: Python-3.11.0b1/Lib/multiprocessing/dummy and Python-3.11.0b5/Lib/multiprocessing/dummy diff Python-3.11.0b1/Lib/multiprocessing/pool.py Python-3.11.0b5/Lib/multiprocessing/pool.py 205a206,208 > if maxtasksperchild is not None: > if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: > raise ValueError("maxtasksperchild must be a positive int or None") diff Python-3.11.0b1/Lib/multiprocessing/process.py Python-3.11.0b5/Lib/multiprocessing/process.py 307,308c307 < util._finalizer_registry.clear() < util._run_after_forkers() --- > self._after_fork() 338a338,344 > @staticmethod > def _after_fork(): > from . import util > util._finalizer_registry.clear() > util._run_after_forkers() > > diff Python-3.11.0b1/Lib/multiprocessing/shared_memory.py Python-3.11.0b5/Lib/multiprocessing/shared_memory.py 25a26 > from . import resource_tracker 119,120c120 < from .resource_tracker import register < register(self._name, "shared_memory") --- > resource_tracker.register(self._name, "shared_memory") 240d239 < from .resource_tracker import unregister 242c241 < unregister(self._name, "shared_memory") --- > resource_tracker.unregister(self._name, "shared_memory") diff Python-3.11.0b1/Modules/_multiprocessing/semaphore.c Python-3.11.0b5/Modules/_multiprocessing/semaphore.c 457,459c457 < SemLockObject *self; < < self = PyObject_New(SemLockObject, type); --- > SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); 576c574 < PyObject_Free(self); --- > Py_TYPE(self)->tp_free((PyObject*)self); diff Python-3.11.0b1/Lib/test/_test_multiprocessing.py Python-3.11.0b5/Lib/test/_test_multiprocessing.py 7a8 > import textwrap 2862a2864,2868 > def test_pool_maxtasksperchild_invalid(self): > for value in [0, -1, 0.5, "12"]: > with self.assertRaises(ValueError): > multiprocessing.Pool(3, maxtasksperchild=value) > 3968c3974 < names = ['test01_fn', 'test02_fn'] --- > names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] 5762a5769,5797 > class TestNamedResource(unittest.TestCase): > def test_global_named_resource_spawn(self): > # > # gh-90549: Check that global named resources in main module > # will not leak by a subprocess, in spawn context. > # > testfn = os_helper.TESTFN > self.addCleanup(os_helper.unlink, testfn) > with open(testfn, 'w', encoding='utf-8') as f: > f.write(textwrap.dedent('''\ > import multiprocessing as mp > > ctx = mp.get_context('spawn') > > global_resource = ctx.Semaphore() > > def submain(): pass > > if __name__ == '__main__': > p = ctx.Process(target=submain) > p.start() > p.join() > ''')) > rc, out, err = test.support.script_helper.assert_python_ok(testfn) > # on error, err = 'UserWarning: resource_tracker: There appear to > # be 1 leaked semaphore objects to clean up at shutdown' > self.assertEqual(err, b'') > > 5993a6029,6040 > > > @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') > @unittest.skipIf(sys.platform != "linux", "Linux only") > class SemLockTests(unittest.TestCase): > > def test_semlock_subclass(self): > class SemLock(_multiprocessing.SemLock): > pass > name = f'test_semlock_subclass-{os.getpid()}' > s = SemLock(1, 0, 10, name, 0) > _multiprocessing.sem_unlink(name) # ---------------------------------------------------------------------- diff Python-3.11.0b5/Lib/multiprocessing/connection.py Python-3.11.0/Lib/multiprocessing/connection.py 76,80d75 < # Prefer abstract sockets if possible to avoid problems with the address < # size. When coding portable applications, some implementations have < # sun_path as short as 92 bytes in the sockaddr_un struct. < if util.abstract_sockets_supported: < return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" diff Python-3.11.0b5/Lib/multiprocessing/popen_spawn_win32.py Python-3.11.0/Lib/multiprocessing/popen_spawn_win32.py 57d56 < cmd = ' '.join('"%s"' % x for x in cmd) 64c63 < python_exe = sys._base_executable --- > cmd[0] = python_exe = sys._base_executable 69a69,70 > cmd = ' '.join('"%s"' % x for x in cmd) > # ---------------------------------------------------------------------- cp Python-3.11.0/Modules/_multiprocessing/clinic/* Modules/_multiprocess/clinic/ # ---------------------------------------------------------------------- diff Python-3.11.0/Lib/multiprocessing/resource_tracker.py Python-3.11.1/Lib/multiprocessing/resource_tracker.py 164c164 < if len(name) > 512: --- > if len(msg) > 512: 167c167 < raise ValueError('name too long') --- > raise ValueError('msg too long') diff Python-3.11.0/Lib/multiprocessing/shared_memory.py Python-3.11.1/Lib/multiprocessing/shared_memory.py 176c176,179 < size = _winapi.VirtualQuerySize(p_buf) --- > try: > size = _winapi.VirtualQuerySize(p_buf) > finally: > _winapi.UnmapViewOfFile(p_buf) diff Python-3.11.0/Lib/test/_test_multiprocessing.py Python-3.11.1/Lib/test/_test_multiprocessing.py 5440a5441,5448 > def test_too_long_name_resource(self): > # gh-96819: Resource names that will make the length of a write to a pipe > # greater than PIPE_BUF are not allowed > rtype = "shared_memory" > too_long_name_resource = "a" * (512 - len(rtype)) > with self.assertRaises(ValueError): > resource_tracker.register(too_long_name_resource, rtype) > 6039c6047 < s = SemLock(1, 0, 10, name, 0) --- > s = SemLock(1, 0, 10, name, False) # ---------------------------------------------------------------------- diff Python-3.11.1/Lib/multiprocessing/process.py Python-3.11.4/Lib/multiprocessing/process.py 64c64 < if p._popen.poll() is not None: --- > if (child_popen := p._popen) and child_popen.poll() is not None: # ---------------------------------------------------------------------- diff Python-3.11.4/Lib/multiprocessing/forkserver.py Python-3.11.5/Lib/multiprocessing/forkserver.py 64c64 < if not all(type(mod) is str for mod in self._preload_modules): --- > if not all(type(mod) is str for mod in modules_names): diff Python-3.11.4/Lib/multiprocessing/spawn.py Python-3.11.5/Lib/multiprocessing/spawn.py 34c34 < WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") --- > WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe") 38c38,40 < if sys.platform == 'win32': --- > if exe is None: > _python_exe = exe > elif sys.platform == 'win32': 151c153,157 < is not going to be frozen to produce an executable.''') --- > is not going to be frozen to produce an executable. > > To fix this issue, refer to the "Safe importing of main module" > section in https://docs.python.org/3/library/multiprocessing.html > ''') diff Python-3.11.4/Lib/multiprocessing/synchronize.py Python-3.11.5/Lib/multiprocessing/synchronize.py 53,54c53,54 < name = ctx.get_start_method() < unlink_now = sys.platform == 'win32' or name == 'fork' --- > self.is_fork_ctx = ctx.get_start_method() == 'fork' > unlink_now = sys.platform == 'win32' or self.is_fork_ctx 105a106,110 > if self.is_fork_ctx: > raise RuntimeError('A SemLock created in a fork context is being ' > 'shared with a process in a spawn context. This is ' > 'not supported. Please use the same context to create ' > 'multiprocessing objects and Process.') # ---------------------------------------------------------------------- diff Python-3.11.4/Lib/test/_test_multiprocessing.py Python-3.11.5/Lib/test/_test_multiprocessing.py 15a16 > import functools 33a35 > from test.support import script_helper 172a175,227 > def only_run_in_spawn_testsuite(reason): > """Returns a decorator: raises SkipTest when SM != spawn at test time. > > This can be useful to save overall Python test suite execution time. > "spawn" is the universal mode available on all platforms so this limits the > decorated test to only execute within test_multiprocessing_spawn. > > This would not be necessary if we refactored our test suite to split things > into other test files when they are not start method specific to be rerun > under all start methods. > """ > > def decorator(test_item): > > @functools.wraps(test_item) > def spawn_check_wrapper(*args, **kwargs): > if (start_method := multiprocessing.get_start_method()) != "spawn": > raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}") > return test_item(*args, **kwargs) > > return spawn_check_wrapper > > return decorator > > > class TestInternalDecorators(unittest.TestCase): > """Logic within a test suite that could errantly skip tests? Test it!""" > > @unittest.skipIf(sys.platform == "win32", "test requires that fork exists.") > def test_only_run_in_spawn_testsuite(self): > if multiprocessing.get_start_method() != "spawn": > raise unittest.SkipTest("only run in test_multiprocessing_spawn.") > > try: > @only_run_in_spawn_testsuite("testing this decorator") > def return_four_if_spawn(): > return 4 > except Exception as err: > self.fail(f"expected decorated `def` not to raise; caught {err}") > > orig_start_method = multiprocessing.get_start_method(allow_none=True) > try: > multiprocessing.set_start_method("spawn", force=True) > self.assertEqual(return_four_if_spawn(), 4) > multiprocessing.set_start_method("fork", force=True) > with self.assertRaises(unittest.SkipTest) as ctx: > return_four_if_spawn() > self.assertIn("testing this decorator", str(ctx.exception)) > self.assertIn("start_method=", str(ctx.exception)) > finally: > multiprocessing.set_start_method(orig_start_method, force=True) > > 5240a5296,5303 > def test_context_check_module_types(self): > try: > ctx = multiprocessing.get_context('forkserver') > except ValueError: > raise unittest.SkipTest('forkserver should be available') > with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'): > ctx.set_forkserver_preload([1, 2, 3]) > 5284a5348,5369 > @unittest.skipIf(sys.platform == "win32", > "Only Spawn on windows so no risk of mixing") > @only_run_in_spawn_testsuite("avoids redundant testing.") > def test_mixed_startmethod(self): > # Fork-based locks cannot be used with spawned process > for process_method in ["spawn", "forkserver"]: > queue = multiprocessing.get_context("fork").Queue() > process_ctx = multiprocessing.get_context(process_method) > p = process_ctx.Process(target=close_queue, args=(queue,)) > err_msg = "A SemLock created in a fork" > with self.assertRaisesRegex(RuntimeError, err_msg): > p.start() > > # non-fork-based locks can be used with all other start methods > for queue_method in ["spawn", "forkserver"]: > for process_method in multiprocessing.get_all_start_methods(): > queue = multiprocessing.get_context(queue_method).Queue() > process_ctx = multiprocessing.get_context(process_method) > p = process_ctx.Process(target=close_queue, args=(queue,)) > p.start() > p.join() > 5777a5863 > @only_run_in_spawn_testsuite("spawn specific test.") 5788d5873 < 5790d5874 < 5792d5875 < 5794d5876 < 5800c5882 < rc, out, err = test.support.script_helper.assert_python_ok(testfn) --- > rc, out, err = script_helper.assert_python_ok(testfn) 5803c5885 < self.assertEqual(err, b'') --- > self.assertFalse(err, msg=err.decode('utf-8')) 5811a5894,5911 > @only_run_in_spawn_testsuite("avoids redundant testing.") > def test_spawn_sys_executable_none_allows_import(self): > # Regression test for a bug introduced in > # https://github.com/python/cpython/issues/90876 that caused an > # ImportError in multiprocessing when sys.executable was None. > # This can be true in embedded environments. > rc, out, err = script_helper.assert_python_ok( > "-c", > """if 1: > import sys > sys.executable = None > assert "multiprocessing" not in sys.modules, "already imported!" > import multiprocessing > import multiprocessing.spawn # This should not fail\n""", > ) > self.assertEqual(rc, 0) > self.assertFalse(err, msg=err.decode('utf-8')) > # ---------------------------------------------------------------------- diff Python-3.11.5/Modules/_multiprocessing/semaphore.c Python-3.11.6/Modules/_multiprocessing/semaphore.c 519,521d518 < if (handle != SEM_FAILED) < SEM_CLOSE(handle); < PyMem_Free(name_copy); 524a522,524 > if (handle != SEM_FAILED) > SEM_CLOSE(handle); > PyMem_Free(name_copy); 558a559 > PyErr_SetFromErrno(PyExc_OSError); 560c561 < return PyErr_SetFromErrno(PyExc_OSError); --- > return NULL; diff Python-3.11.5/Lib/multiprocessing/connection.py Python-3.11.6/Lib/multiprocessing/connection.py 11a12 > import errno 273a275 > _send_ov = None 275a278,281 > ov = self._send_ov > if ov is not None: > # Interrupt WaitForMultipleObjects() in _send_bytes() > ov.cancel() 278a285,288 > if self._send_ov is not None: > # A connection should only be used by a single thread > raise ValueError("concurrent send_bytes() calls " > "are not supported") 279a290 > self._send_ov = ov 288a300 > self._send_ov = None 289a302,306 > if err == _winapi.ERROR_OPERATION_ABORTED: > # close() was called by another thread while > # WaitForMultipleObjects() was waiting for the overlapped > # operation. > raise OSError(errno.EPIPE, "handle is closed") diff Python-3.11.5/Lib/multiprocessing/popen_spawn_win32.py Python-3.11.6/Lib/multiprocessing/popen_spawn_win32.py 16a17 > # Exit code used by Popen.terminate() 125,126c126,130 < except OSError: < if self.wait(timeout=1.0) is None: --- > except PermissionError: > # ERROR_ACCESS_DENIED (winerror 5) is received when the > # process already died. > code = _winapi.GetExitCodeProcess(int(self._handle)) > if code == _winapi.STILL_ACTIVE: 127a132,134 > self.returncode = code > else: > self.returncode = -signal.SIGTERM diff Python-3.11.5/Lib/multiprocessing/resource_tracker.py Python-3.11.6/Lib/multiprocessing/resource_tracker.py 53a54,57 > class ReentrantCallError(RuntimeError): > pass > > 57c61 < self._lock = threading.Lock() --- > self._lock = threading.RLock() 60a65,72 > def _reentrant_call_error(self): > # gh-109629: this happens if an explicit call to the ResourceTracker > # gets interrupted by a garbage collection, invoking a finalizer (*) > # that itself calls back into ResourceTracker. > # (*) for example the SemLock finalizer > raise ReentrantCallError( > "Reentrant call into the multiprocessing resource tracker") > 62a75,78 > # This should not happen (_stop() isn't called by a finalizer) > # but we check for it anyway. > if self._lock._recursion_count() > 1: > return self._reentrant_call_error() 83a100,102 > if self._lock._recursion_count() > 1: > # The code below is certainly not reentrant-safe, so bail out > return self._reentrant_call_error() 162c181,191 < self.ensure_running() --- > try: > self.ensure_running() > except ReentrantCallError: > # The code below might or might not work, depending on whether > # the resource tracker was already running and still alive. > # Better warn the user. > # (XXX is warnings.warn itself reentrant-safe? :-) > warnings.warn( > f"ResourceTracker called reentrantly for resource cleanup, " > f"which is unsupported. " > f"The {rtype} object {name!r} might leak.") 178a208 > diff Python-3.11.5/Lib/multiprocessing/synchronize.py Python-3.11.6/Lib/multiprocessing/synchronize.py 53,54c53,54 < self.is_fork_ctx = ctx.get_start_method() == 'fork' < unlink_now = sys.platform == 'win32' or self.is_fork_ctx --- > self._is_fork_ctx = ctx.get_start_method() == 'fork' > unlink_now = sys.platform == 'win32' or self._is_fork_ctx 106c106 < if self.is_fork_ctx: --- > if self._is_fork_ctx: 117a118,119 > # Ensure that deserialized SemLock can be serialized again (gh-108520). > self._is_fork_ctx = False diff Python-3.11.5/Lib/test/_test_multiprocessing.py Python-3.11.6/Lib/test/_test_multiprocessing.py 81c81 < # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to --- > # gh-89363: Skip multiprocessing tests if Python is built with ASAN to 330a331 > @support.requires_resource('cpu') 558,559c559 < if os.name != 'nt': < self.assertEqual(exitcode, -signal.SIGTERM) --- > self.assertEqual(exitcode, -signal.SIGTERM) 564a565,566 > else: > self.assertEqual(exitcode, -signal.SIGTERM) 675a678 > @support.requires_resource('walltime') 4472a4476 > @support.requires_resource('cpu') 4919a4924 > @support.requires_resource('walltime') 4947a4953 > @support.requires_resource('walltime') 5369a5376,5403 > @classmethod > def _put_one_in_queue(cls, queue): > queue.put(1) > > @classmethod > def _put_two_and_nest_once(cls, queue): > queue.put(2) > process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,)) > process.start() > process.join() > > def test_nested_startmethod(self): > # gh-108520: Regression test to ensure that child process can send its > # arguments to another process > queue = multiprocessing.Queue() > > process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,)) > process.start() > process.join() > > results = [] > while not queue.empty(): > results.append(queue.get()) > > # gh-109706: queue.put(1) can write into the queue before queue.put(2), > # there is no synchronization in the test. > self.assertSetEqual(set(results), set([2, 1])) > 6052c6086,6087 < def install_tests_in_module_dict(remote_globs, start_method): --- > def install_tests_in_module_dict(remote_globs, start_method, > only_type=None, exclude_types=False): 6064a6100,6103 > if only_type and type_ != only_type: > continue > if exclude_types: > continue 6074a6114,6116 > if only_type: > continue > # ---------------------------------------------------------------------- diff Python-3.11.6/Lib/test/_test_multiprocessing.py Python-3.11.7/Lib/test/_test_multiprocessing.py 80c80 < if support.check_sanitizer(address=True): --- > if support.HAVE_ASAN_FORK_BUG: 83c83,88 < raise unittest.SkipTest("libasan has a pthread_create() dead lock") --- > raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") > > > # gh-110666: Tolerate a difference of 100 ms when comparing timings > # (clock resolution) > CLOCK_RES = 0.100 1653c1658 < expected = 0.1 --- > expected = 0.100 1657,1658c1662 < # borrow logic in assertTimeout() from test/lock_tests.py < if not result and expected * 0.6 < dt < expected * 10.0: --- > if not result and (expected - CLOCK_RES) <= dt: 1677c1681 < time.sleep(0.01) --- > time.sleep(0.010) 2436,2437c2440,2444 < def sqr(x, wait=0.0): < time.sleep(wait) --- > def sqr(x, wait=0.0, event=None): > if event is None: > time.sleep(wait) > else: > event.wait(wait) 2576,2579c2583,2594 < res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) < get = TimingWrapper(res.get) < self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) < self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) --- > p = self.Pool(3) > try: > event = threading.Event() if self.TYPE == 'threads' else None > res = p.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT, event)) > get = TimingWrapper(res.get) > self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) > self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) > finally: > if event is not None: > event.set() > p.terminate() > p.join() 2680,2687c2695,2700 < result = self.pool.map_async( < time.sleep, [0.1 for i in range(10000)], chunksize=1 < ) < self.pool.terminate() < join = TimingWrapper(self.pool.join) < join() < # Sanity check the pool didn't wait for all tasks to finish < self.assertLess(join.elapsed, 2.0) --- > # Simulate slow tasks which take "forever" to complete > p = self.Pool(3) > args = [support.LONG_TIMEOUT for i in range(10_000)] > result = p.map_async(time.sleep, args, chunksize=1) > p.terminate() > p.join() 4379,4383c4392,4394 < deadline = time.monotonic() + support.LONG_TIMEOUT < t = 0.1 < while time.monotonic() < deadline: < time.sleep(t) < t = min(t*2, 5) --- > err_msg = ("A SharedMemory segment was leaked after " > "a process was abruptly terminated") > for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg): 4388,4390d4398 < else: < raise AssertionError("A SharedMemory segment was leaked after" < " a process was abruptly terminated.") 4839c4847 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4879c4887 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4928c4936 < expected = 5 --- > timeout = 5.0 # seconds 4932c4940 < res = wait([a, b], expected) --- > res = wait([a, b], timeout) 4936,4937c4944 < self.assertLess(delta, expected * 2) < self.assertGreater(delta, expected * 0.5) --- > self.assertGreater(delta, timeout - CLOCK_RES) 4940,4941d4946 < < start = time.monotonic() 4943,4944d4947 < delta = time.monotonic() - start < 4946d4948 < self.assertLess(delta, 0.4) 5461,5463c5463,5466 < deadline = time.monotonic() + support.LONG_TIMEOUT < while time.monotonic() < deadline: < time.sleep(.5) --- > err_msg = (f"A {rtype} resource was leaked after a process was " > f"abruptly terminated") > for _ in support.sleeping_retry(support.SHORT_TIMEOUT, > err_msg): 5471,5474c5474 < else: < raise AssertionError( < f"A {rtype} resource was leaked after a process was " < f"abruptly terminated.") --- > 5709a5710 > 5711,5720c5712,5713 < t = 0.01 < while len(multiprocessing.active_children()) > 1: < time.sleep(t) < t *= 2 < dt = time.monotonic() - start_time < if dt >= 5.0: < test.support.environment_altered = True < support.print_warning(f"multiprocessing.Manager still has " < f"{multiprocessing.active_children()} " < f"active children after {dt} seconds") --- > for _ in support.sleeping_retry(5.0, error=False): > if len(multiprocessing.active_children()) <= 1: 5721a5715,5720 > else: > dt = time.monotonic() - start_time > support.environment_altered = True > support.print_warning(f"multiprocessing.Manager still has " > f"{multiprocessing.active_children()} " > f"active children after {dt:.1f} seconds") 6034,6043c6033,6034 < t = 0.01 < while len(multiprocessing.active_children()) > 1: < time.sleep(t) < t *= 2 < dt = time.monotonic() - start_time < if dt >= 5.0: < test.support.environment_altered = True < support.print_warning(f"multiprocessing.Manager still has " < f"{multiprocessing.active_children()} " < f"active children after {dt} seconds") --- > for _ in support.sleeping_retry(5.0, error=False): > if len(multiprocessing.active_children()) <= 1: 6044a6036,6041 > else: > dt = time.monotonic() - start_time > support.environment_altered = True > support.print_warning(f"multiprocessing.Manager still has " > f"{multiprocessing.active_children()} " > f"active children after {dt:.1f} seconds") uqfoundation-multiprocess-b3457a5/py3.11/_multiprocess/000077500000000000000000000000001455552142400231435ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/_multiprocess/__init__.py000066400000000000000000000005011455552142400252500ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.11/doc/000077500000000000000000000000001455552142400210205ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/doc/CHANGES.html000066400000000000000000001133431455552142400227630ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.11/doc/COPYING.html000066400000000000000000000040211455552142400230130ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.11/doc/INSTALL.html000066400000000000000000000063531455552142400230230ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.11/doc/THANKS.html000066400000000000000000000017751455552142400227100ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.11/doc/__init__.py000066400000000000000000000004001455552142400231230ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.11/doc/connection-objects.html000066400000000000000000000152041455552142400254760ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.11/doc/connection-objects.txt000066400000000000000000000072761455552142400253630ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/connection-ref.html000066400000000000000000000357371455552142400246360ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.11/doc/connection-ref.txt000066400000000000000000000210001455552142400244630ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.11/doc/header.txt000066400000000000000000000003401455552142400230060ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.11/doc/html4css1.css000066400000000000000000000126361455552142400233640ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.11/doc/index.html000066400000000000000000000064761455552142400230320ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.11/doc/index.txt000066400000000000000000000021751455552142400226750ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.11/doc/intro.html000066400000000000000000000427461455552142400230560ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.11/doc/intro.txt000066400000000000000000000301551455552142400227200ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.11/doc/manager-objects.html000066400000000000000000000440461455552142400247570ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.11/doc/manager-objects.txt000066400000000000000000000235161455552142400246310ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/pool-objects.html000066400000000000000000000265511455552142400243170ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.11/doc/pool-objects.txt000066400000000000000000000136411455552142400241660ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.11/doc/process-objects.html000066400000000000000000000235741455552142400250260ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.11/doc/process-objects.txt000066400000000000000000000136131455552142400246720ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/processing-ref.html000066400000000000000000000573611455552142400246500ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.11/doc/processing-ref.txt000066400000000000000000000310141455552142400245060ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/programming-guidelines.html000066400000000000000000000214551455552142400263650ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.11/doc/programming-guidelines.txt000066400000000000000000000150221455552142400262310ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.11/doc/proxy-objects.html000066400000000000000000000175771455552142400245370ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.11/doc/proxy-objects.txt000066400000000000000000000115571455552142400244020ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/queue-objects.html000066400000000000000000000227101455552142400244630ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.11/doc/queue-objects.txt000066400000000000000000000121211455552142400243310ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.11/doc/sharedctypes.html000066400000000000000000000241571455552142400244150ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.11/doc/sharedctypes.txt000066400000000000000000000143071455552142400242640ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.11/doc/tests.html000066400000000000000000000060761455552142400230610ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.11/doc/tests.txt000066400000000000000000000027331455552142400227300ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.11/doc/version.txt000066400000000000000000000000341455552142400232430ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.11/examples/000077500000000000000000000000001455552142400220715ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/examples/__init__.py000066400000000000000000000000001455552142400241700ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/examples/benchmarks.py000066400000000000000000000131321455552142400245600ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.11/examples/ex_newtype.py000066400000000000000000000030731455552142400246350ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.11/examples/ex_pool.py000066400000000000000000000155061455552142400241170ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.11/examples/ex_synchronize.py000066400000000000000000000144041455552142400255150ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.11/examples/ex_webserver.py000066400000000000000000000041001455552142400251360ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.11/examples/ex_workers.py000066400000000000000000000042241455552142400246350ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.11/index.html000066400000000000000000000117511455552142400222550ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/000077500000000000000000000000001455552142400230045ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/__init__.py000066400000000000000000000035001455552142400251130ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/connection.py000066400000000000000000000774341455552142400255340ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import errno import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) if m.itemsize > 1: m = m.cast('B') n = m.nbytes if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False _send_ov = None def _close(self, _CloseHandle=_winapi.CloseHandle): ov = self._send_ov if ov is not None: # Interrupt WaitForMultipleObjects() in _send_bytes() ov.cancel() _CloseHandle(self._handle) def _send_bytes(self, buf): if self._send_ov is not None: # A connection should only be used by a single thread raise ValueError("concurrent send_bytes() calls " "are not supported") ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) self._send_ov = ov try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: self._send_ov = None nwritten, err = ov.GetOverlappedResult(True) if err == _winapi.ERROR_OPERATION_ABORTED: # close() was called by another thread while # WaitForMultipleObjects() was waiting for the overlapped # operation. raise OSError(errno.EPIPE, "handle is closed") assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects shareable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/context.py000066400000000000000000000265321455552142400250520ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) @staticmethod def _after_fork(): return _default_context.get_context().Process._after_fork() class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/dummy/000077500000000000000000000000001455552142400241375ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262650ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266560ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/forkserver.py000066400000000000000000000275421455552142400255600ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in modules_names): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/heap.py000066400000000000000000000265521455552142400243050ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/managers.py000066400000000000000000001351771455552142400251710ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) del view_type, view_types # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def _handle_request(self, c): request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) def handle_request(self, conn): ''' Handle a new connection ''' try: self._handle_request(conn) except SystemExit: # Server.serve_client() calls sys.exit(0) on EOF pass finally: conn.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None, *, shutdown_timeout=1.0): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() self._shutdown_timeout = shutdown_timeout def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client, self._shutdown_timeout), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client, shutdown_timeout): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive after terminate') process.kill() process.join() state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/pool.py000066400000000000000000000777671455552142400243570ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if maxtasksperchild is not None: if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: raise ValueError("maxtasksperchild must be a positive int or None") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/popen_fork.py000066400000000000000000000045061455552142400255250ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267540ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400271430ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/popen_spawn_win32.py000066400000000000000000000104011455552142400267250ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # # Exit code used by Popen.terminate() TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): cmd[0] = python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None cmd = ' '.join('"%s"' % x for x in cmd) with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except PermissionError: # ERROR_ACCESS_DENIED (winerror 5) is received when the # process already died. code = _winapi.GetExitCodeProcess(int(self._handle)) if code == _winapi.STILL_ACTIVE: raise self.returncode = code else: self.returncode = -signal.SIGTERM kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/process.py000066400000000000000000000275451455552142400250510ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if (child_popen := p._popen) and child_popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: self._after_fork() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode @staticmethod def _after_fork(): from . import util util._finalizer_registry.clear() util._run_after_forkers() # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' del name, signum # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/queues.py000066400000000000000000000275531455552142400247010ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/reduction.py000066400000000000000000000226451455552142400253630ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400265470ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/resource_tracker.py000066400000000000000000000243161455552142400267260ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem # Use sem_unlink() to clean up named semaphores. # # sem_unlink() may be missing if the Python build process detected the # absence of POSIX named semaphores. In that case, no named semaphores were # ever opened, so no cleanup would be necessary. if hasattr(_multiprocessing, 'sem_unlink'): _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, }) _CLEANUP_FUNCS.update({ 'shared_memory': _posixshmem.shm_unlink, }) class ReentrantCallError(RuntimeError): pass class ResourceTracker(object): def __init__(self): self._lock = threading.RLock() self._fd = None self._pid = None def _reentrant_call_error(self): # gh-109629: this happens if an explicit call to the ResourceTracker # gets interrupted by a garbage collection, invoking a finalizer (*) # that itself calls back into ResourceTracker. # (*) for example the SemLock finalizer raise ReentrantCallError( "Reentrant call into the multiprocess resource tracker") def _stop(self): with self._lock: # This should not happen (_stop() isn't called by a finalizer) # but we check for it anyway. if getattr(self._lock, "_recursion_count", int)() > 1: return self._reentrant_call_error() if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if getattr(self._lock, "_recursion_count", int)() > 1: # The code below is certainly not reentrant-safe, so bail out return self._reentrant_call_error() if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): try: self.ensure_running() except ReentrantCallError: # The code below might or might not work, depending on whether # the resource tracker was already running and still alive. # Better warn the user. # (XXX is warnings.warn itself reentrant-safe? :-) warnings.warn( f"ResourceTracker called reentrantly for resource cleanup, " f"which is unsupported. " f"The {rtype} object {name!r} might leak.") msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(msg) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('msg too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/shared_memory.py000066400000000000000000000440321455552142400262170ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True from . import resource_tracker _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise resource_tracker.register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) try: size = _winapi.VirtualQuerySize(p_buf) finally: _winapi.UnmapViewOfFile(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: _posixshmem.shm_unlink(self._name) resource_tracker.unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260570ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/spawn.py000066400000000000000000000226611455552142400245150ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe") def set_executable(exe): global _python_exe if exe is None: _python_exe = exe elif sys.platform == 'win32': _python_exe = os.fsdecode(exe) else: _python_exe = os.fsencode(exe) def get_executable(): return _python_exe if WINSERVICE: set_executable(os.path.join(sys.exec_prefix, 'python.exe')) else: set_executable(sys.executable) # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() exe = get_executable() return [exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. To fix this issue, refer to the "Safe importing of main module" section in https://docs.python.org/3/library/multiprocessing.html ''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/synchronize.py000066400000000000000000000304151455552142400257340ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() self._is_fork_ctx = ctx.get_start_method() == 'fork' unlink_now = sys.platform == 'win32' or self._is_fork_ctx for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: if self._is_fork_ctx: #XXX: limits pickling? raise RuntimeError('A SemLock created in a fork context is being ' 'shared with a process in a spawn context. This is ' 'not supported. Please use the same context to create ' 'multiprocess objects and Process.') h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() # Ensure that deserialized SemLock can be serialized again (gh-108520). self._is_fork_ctx = False @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False def __repr__(self) -> str: set_status = 'set' if self.is_set() else 'unset' return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/000077500000000000000000000000001455552142400241465ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/__init__.py000066400000000000000000006324521455552142400262730ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import textwrap import time import io import itertools import sys import os import gc import errno import functools import signal import array import socket import random import logging import subprocess import struct import operator import pathlib import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import script_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'HAVE_ASAN_FORK_BUG') and support.HAVE_ASAN_FORK_BUG: # gh-89363: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") # gh-110666: Tolerate a difference of 100 ms when comparing timings # (clock resolution) CLOCK_RES = 0.100 # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 # BaseManager.shutdown_timeout SHUTDOWN_TIMEOUT = support.SHORT_TIMEOUT HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) def only_run_in_spawn_testsuite(reason): """Returns a decorator: raises SkipTest when SM != spawn at test time. This can be useful to save overall Python test suite execution time. "spawn" is the universal mode available on all platforms so this limits the decorated test to only execute within test_multiprocessing_spawn. This would not be necessary if we refactored our test suite to split things into other test files when they are not start method specific to be rerun under all start methods. """ def decorator(test_item): @functools.wraps(test_item) def spawn_check_wrapper(*args, **kwargs): if (start_method := multiprocessing.get_start_method()) != "spawn": raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}") return test_item(*args, **kwargs) return spawn_check_wrapper return decorator class TestInternalDecorators(unittest.TestCase): """Logic within a test suite that could errantly skip tests? Test it!""" @unittest.skipIf(sys.platform == "win32", "test requires that fork exists.") def test_only_run_in_spawn_testsuite(self): if multiprocessing.get_start_method() != "spawn": raise unittest.SkipTest("only run in test_multiprocessing_spawn.") try: @only_run_in_spawn_testsuite("testing this decorator") def return_four_if_spawn(): return 4 except Exception as err: self.fail(f"expected decorated `def` not to raise; caught {err}") orig_start_method = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method("spawn", force=True) self.assertEqual(return_four_if_spawn(), 4) multiprocessing.set_start_method("fork", force=True) with self.assertRaises(unittest.SkipTest) as ctx: return_four_if_spawn() self.assertIn("testing this decorator", str(ctx.exception)) self.assertIn("start_method=", str(ctx.exception)) finally: multiprocessing.set_start_method(orig_start_method, force=True) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_set_executable(self): if self.TYPE == 'threads': self.skipTest(f'test not appropriate for {self.TYPE}') paths = [ sys.executable, # str sys.executable.encode(), # bytes pathlib.Path(sys.executable) # os.PathLike ] for path in paths: self.set_executable(path) p = self.Process() p.start() p.join() self.assertEqual(p.exitcode, 0) @support.requires_resource('cpu') def test_args_argument(self): # bpo-45735: Using list or tuple as *args* in constructor could # achieve the same effect. args_cases = (1, "str", [1], (1,)) args_types = (list, tuple) test_cases = itertools.product(args_cases, args_types) for args, args_type in test_cases: with self.subTest(args=args, args_type=args_type): q = self.Queue(1) # pass a tuple or list as args p = self.Process(target=self._test_args, args=args_type((q, args))) p.daemon = True p.start() child_args = q.get() self.assertEqual(child_args, args) p.join() close_queue(q) @classmethod def _test_args(cls, q, arg): q.put(arg) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) else: self.assertEqual(exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @support.requires_resource('walltime') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) #self.assertIn("1/0 # MARKER", err) #FIXME @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.100 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt if not result and (expected - CLOCK_RES) <= dt: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.010) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() def test_repr(self) -> None: event = self.Event() if self.TYPE == 'processes': self.assertRegex(repr(event), r"") event.set() self.assertRegex(repr(event), r"") event.clear() self.assertRegex(repr(event), r"") elif self.TYPE == 'manager': self.assertRegex(repr(event), r" 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() @unittest.skipIf(True, "fails with dill >= 0.3.5") def test_shared_memory_recreate(self): # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) def test_invalid_shared_memory_cration(self): # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_pickle_unpickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) sms.buf[0:6] = b'pickle' # Test pickling pickled_sms = pickle.dumps(sms, protocol=proto) # Test unpickling sms2 = pickle.loads(pickled_sms) self.assertIsInstance(sms2, shared_memory.SharedMemory) self.assertEqual(sms.name, sms2.name) self.assertEqual(bytes(sms.buf[0:6]), b'pickle') self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') # Test that unpickled version is still the same SharedMemory sms.buf[0:6] = b'newval' self.assertEqual(bytes(sms.buf[0:6]), b'newval') self.assertEqual(bytes(sms2.buf[0:6]), b'newval') sms2.buf[0:6] = b'oldval' self.assertEqual(bytes(sms.buf[0:6]), b'oldval') self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') def test_shared_memory_pickle_unpickle_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sms.close() sms.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(pickled_sms) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl, protocol=proto) deserialized_sl = pickle.loads(serialized_sl) self.assertIsInstance( deserialized_sl, shared_memory.ShareableList) self.assertEqual(deserialized_sl[-1], 9) self.assertIsNot(sl, deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") sl[3] = "newvalue" self.assertEqual(deserialized_sl[3], "newvalue") larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_ShareableList_pickling_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) serialized_sl = pickle.dumps(sl, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sl.shm.close() sl.shm.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(serialized_sl) def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() err_msg = ("A SharedMemory segment was leaked after " "a process was abruptly terminated") if hasattr(support, 'sleeping_retry'): # if >= 3.11.7 for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg): try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: deadline = time.monotonic() + support.LONG_TIMEOUT t = 0.1 while time.monotonic() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError(err_msg) if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # Test to verify that `Finalize` works. # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @support.requires_resource('cpu') def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random() * 0.100) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random() * 0.100) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) @support.requires_resource('walltime') def test_wait_timeout(self): from multiprocess.connection import wait timeout = 5.0 # seconds a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], timeout) delta = time.monotonic() - start self.assertEqual(res, []) self.assertGreater(delta, timeout - CLOCK_RES) b.send(None) res = wait([a, b], 20) self.assertEqual(res, [a]) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) @support.requires_resource('walltime') def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_context_check_module_types(self): try: ctx = multiprocessing.get_context('forkserver') except ValueError: raise unittest.SkipTest('forkserver should be available') with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'): ctx.set_forkserver_preload([1, 2, 3]) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "Only Spawn on windows so no risk of mixing") @only_run_in_spawn_testsuite("avoids redundant testing.") def test_mixed_startmethod(self): # Fork-based locks cannot be used with spawned process for process_method in ["spawn", "forkserver"]: queue = multiprocessing.get_context("fork").Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) err_msg = "A SemLock created in a fork" with self.assertRaisesRegex(RuntimeError, err_msg): p.start() # non-fork-based locks can be used with all other start methods for queue_method in ["spawn", "forkserver"]: for process_method in multiprocessing.get_all_start_methods(): queue = multiprocessing.get_context(queue_method).Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) p.start() p.join() @classmethod def _put_one_in_queue(cls, queue): queue.put(1) @classmethod def _put_two_and_nest_once(cls, queue): queue.put(2) process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,)) process.start() process.join() def test_nested_startmethod(self): # gh-108520: Regression test to ensure that child process can send its # arguments to another process queue = multiprocessing.Queue() process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,)) process.start() process.join() results = [] while not queue.empty(): results.append(queue.get()) # gh-109706: queue.put(1) can write into the queue before queue.put(2), # there is no synchronization in the test. self.assertSetEqual(set(results), set([2, 1])) @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() err_msg = (f"A {rtype} resource was leaked after a process was " f"abruptly terminated") if hasattr(support, 'sleeping_retry'): # if >= 3.11.7 for _ in support.sleeping_retry(support.SHORT_TIMEOUT, err_msg): try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, # but OSX seems to give EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: deadline = time.monotonic() + support.LONG_TIMEOUT while time.monotonic() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, # but OSX seems to give EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError(err_msg) err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) def test_too_long_name_resource(self): # gh-96819: Resource names that will make the length of a write to a pipe # greater than PIPE_BUF are not allowed rtype = "shared_memory" too_long_name_resource = "a" * (512 - len(rtype)) with self.assertRaises(ValueError): resource_tracker.register(too_long_name_resource, rtype) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = time.monotonic() if hasattr(support, 'sleeping_retry'): # if >= 3.11.7 for _ in support.sleeping_retry(5.0, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") else: t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class TestNamedResource(unittest.TestCase): @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("spawn specific test.") def test_global_named_resource_spawn(self): # # gh-90549: Check that global named resources in main module # will not leak by a subprocess, in spawn context. # testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) with open(testfn, 'w', encoding='utf-8') as f: f.write(textwrap.dedent('''\ import multiprocess as mp ctx = mp.get_context('spawn') global_resource = ctx.Semaphore() def submain(): pass if __name__ == '__main__': p = ctx.Process(target=submain) p.start() p.join() ''')) rc, out, err = script_helper.assert_python_ok(testfn) # on error, err = 'UserWarning: resource_tracker: There appear to # be 1 leaked semaphore objects to clean up at shutdown' self.assertFalse(err, msg=err.decode('utf-8')) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("avoids redundant testing.") def test_spawn_sys_executable_none_allows_import(self): # Regression test for a bug introduced in # https://github.com/python/cpython/issues/90876 that caused an # ImportError in multiprocessing when sys.executable was None. # This can be true in embedded environments. rc, out, err = script_helper.assert_python_ok( "-c", """if 1: import sys sys.executable = None assert "multiprocess" not in sys.modules, "already imported!" import multiprocess as multiprocessing import multiprocess.spawn # This should not fail\n""", ) self.assertEqual(rc, 0) self.assertFalse(err, msg=err.decode('utf-8')) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) set_executable = staticmethod(multiprocessing.set_executable) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() if hasattr(support, 'sleeping_retry'): # if >= 3.11.7 for _ in support.sleeping_retry(5.0, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") else: t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocessing.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method, only_type=None, exclude_types=False): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: if only_type and type_ != only_type: continue if exclude_types: continue newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): if only_type: continue class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') @unittest.skipIf(sys.platform != "linux", "Linux only") class SemLockTests(unittest.TestCase): def test_semlock_subclass(self): class SemLock(_multiprocessing.SemLock): pass name = f'test_semlock_subclass-{os.getpid()}' s = SemLock(1, 0, 10, name, False) _multiprocessing.sem_unlink(name) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/__main__.py000066400000000000000000000015701455552142400262430ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + '__init__.py') + \ glob.glob(suite + os.path.sep + '*' + os.path.sep + '__init__.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400271510ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400266470ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_fork/000077500000000000000000000000001455552142400314555ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_fork/__init__.py000066400000000000000000000014751455552142400335750ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003021455552142400344140ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_fork/test_misc.py000066400000000000000000000003011455552142400340130ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003041455552142400350120ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003021455552142400344340ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserver/000077500000000000000000000000001455552142400327045ustar00rootroot00000000000000__init__.py000066400000000000000000000013421455552142400347360ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserverimport os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003101455552142400356420ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="manager") if __name__ == '__main__': unittest.main() test_misc.py000066400000000000000000000003071455552142400351710ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003121455552142400362400ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003101455552142400356620ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000273621455552142400336700ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support from test.support import import_helper # Skip tests if _multiprocessing wasn't built. import_helper.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support import os_helper from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawn/000077500000000000000000000000001455552142400316445ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawn/__init__.py000066400000000000000000000011771455552142400337630ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003031455552142400346040ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawn/test_misc.py000066400000000000000000000003021455552142400342030ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003051455552142400352020ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003031455552142400346240ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.11/multiprocess/util.py000066400000000000000000000333541455552142400243430ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, encoding="utf-8", closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess import subprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [path], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, -1, None, None, None, -1, None, subprocess._USE_VFORK) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/py3.12/000077500000000000000000000000001455552142400202545ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/Modules/000077500000000000000000000000001455552142400216645ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/000077500000000000000000000000001455552142400245545ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400260155ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/clinic/multiprocessing.c.h000066400000000000000000000110211455552142400316310ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) # include "pycore_gc.h" // PyGC_Head # include "pycore_runtime.h" // _Py_ID() #endif #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_closesocket__doc__, "closesocket($module, handle, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF \ {"closesocket", (PyCFunction)_multiprocessing_closesocket, METH_O, _multiprocessing_closesocket__doc__}, static PyObject * _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle); static PyObject * _multiprocessing_closesocket(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; HANDLE handle; handle = PyLong_AsVoidPtr(arg); if (!handle && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_closesocket_impl(module, handle); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_recv__doc__, "recv($module, handle, size, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_RECV_METHODDEF \ {"recv", _PyCFunction_CAST(_multiprocessing_recv), METH_FASTCALL, _multiprocessing_recv__doc__}, static PyObject * _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size); static PyObject * _multiprocessing_recv(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; int size; if (!_PyArg_CheckPositional("recv", nargs, 2, 2)) { goto exit; } handle = PyLong_AsVoidPtr(args[0]); if (!handle && PyErr_Occurred()) { goto exit; } size = _PyLong_AsInt(args[1]); if (size == -1 && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_recv_impl(module, handle, size); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_send__doc__, "send($module, handle, buf, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEND_METHODDEF \ {"send", _PyCFunction_CAST(_multiprocessing_send), METH_FASTCALL, _multiprocessing_send__doc__}, static PyObject * _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf); static PyObject * _multiprocessing_send(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; Py_buffer buf = {NULL, NULL}; if (!_PyArg_CheckPositional("send", nargs, 2, 2)) { goto exit; } handle = PyLong_AsVoidPtr(args[0]); if (!handle && PyErr_Occurred()) { goto exit; } if (PyObject_GetBuffer(args[1], &buf, PyBUF_SIMPLE) != 0) { goto exit; } if (!PyBuffer_IsContiguous(&buf, 'C')) { _PyArg_BadArgument("send", "argument 2", "contiguous buffer", args[1]); goto exit; } return_value = _multiprocessing_send_impl(module, handle, &buf); exit: /* Cleanup for buf */ if (buf.obj) { PyBuffer_Release(&buf); } return return_value; } #endif /* defined(MS_WINDOWS) */ PyDoc_STRVAR(_multiprocessing_sem_unlink__doc__, "sem_unlink($module, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEM_UNLINK_METHODDEF \ {"sem_unlink", (PyCFunction)_multiprocessing_sem_unlink, METH_O, _multiprocessing_sem_unlink__doc__}, static PyObject * _multiprocessing_sem_unlink_impl(PyObject *module, const char *name); static PyObject * _multiprocessing_sem_unlink(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; const char *name; if (!PyUnicode_Check(arg)) { _PyArg_BadArgument("sem_unlink", "argument", "str", arg); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(arg, &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } return_value = _multiprocessing_sem_unlink_impl(module, name); exit: return return_value; } #ifndef _MULTIPROCESSING_CLOSESOCKET_METHODDEF #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF #endif /* !defined(_MULTIPROCESSING_CLOSESOCKET_METHODDEF) */ #ifndef _MULTIPROCESSING_RECV_METHODDEF #define _MULTIPROCESSING_RECV_METHODDEF #endif /* !defined(_MULTIPROCESSING_RECV_METHODDEF) */ #ifndef _MULTIPROCESSING_SEND_METHODDEF #define _MULTIPROCESSING_SEND_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEND_METHODDEF) */ /*[clinic end generated code: output=4a6afc67c1f5ec85 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000120111455552142400305760ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) # include "pycore_gc.h" // PyGC_Head # include "pycore_runtime.h" // _Py_ID() #endif #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", _PyCFunction_CAST(_posixshmem_shm_open), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 3 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(path), &_Py_ID(flags), &_Py_ID(mode), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"path", "flags", "mode", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "shm_open", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[3]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; PyObject *path; int flags; int mode = 511; int _return_value; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; flags = _PyLong_AsInt(args[1]); if (flags == -1 && PyErr_Occurred()) { goto exit; } if (!noptargs) { goto skip_optional_pos; } mode = _PyLong_AsInt(args[2]); if (mode == -1 && PyErr_Occurred()) { goto exit; } skip_optional_pos: _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", _PyCFunction_CAST(_posixshmem_shm_unlink), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 1 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(path), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"path", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "shm_unlink", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[1]; PyObject *path; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=3f6fee283d5fd0e9 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/clinic/semaphore.c.h000066400000000000000000000405721455552142400304020ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) # include "pycore_gc.h" // PyGC_Head # include "pycore_runtime.h" // _Py_ID() #endif #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 2 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(block), &_Py_ID(timeout), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "acquire", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = PyObject_IsTrue(args[0]); if (blocking < 0) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 2 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(block), &_Py_ID(timeout), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "acquire", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = PyObject_IsTrue(args[0]); if (blocking < 0) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) static PyObject * _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink); static PyObject * _multiprocessing_SemLock(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 5 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(kind), &_Py_ID(value), &_Py_ID(maxvalue), &_Py_ID(name), &_Py_ID(unlink), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "SemLock", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[5]; PyObject * const *fastargs; Py_ssize_t nargs = PyTuple_GET_SIZE(args); int kind; int value; int maxvalue; const char *name; int unlink; fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 5, 5, 0, argsbuf); if (!fastargs) { goto exit; } kind = _PyLong_AsInt(fastargs[0]); if (kind == -1 && PyErr_Occurred()) { goto exit; } value = _PyLong_AsInt(fastargs[1]); if (value == -1 && PyErr_Occurred()) { goto exit; } maxvalue = _PyLong_AsInt(fastargs[2]); if (maxvalue == -1 && PyErr_Occurred()) { goto exit; } if (!PyUnicode_Check(fastargs[3])) { _PyArg_BadArgument("SemLock", "argument 'name'", "str", fastargs[3]); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(fastargs[3], &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } unlink = PyObject_IsTrue(fastargs[4]); if (unlink < 0) { goto exit; } return_value = _multiprocessing_SemLock_impl(type, kind, value, maxvalue, name, unlink); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__rebuild__doc__, "_rebuild($type, handle, kind, maxvalue, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF \ {"_rebuild", _PyCFunction_CAST(_multiprocessing_SemLock__rebuild), METH_FASTCALL|METH_CLASS, _multiprocessing_SemLock__rebuild__doc__}, static PyObject * _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name); static PyObject * _multiprocessing_SemLock__rebuild(PyTypeObject *type, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; SEM_HANDLE handle; int kind; int maxvalue; const char *name; if (!_PyArg_ParseStack(args, nargs, ""F_SEM_HANDLE"iiz:_rebuild", &handle, &kind, &maxvalue, &name)) { goto exit; } return_value = _multiprocessing_SemLock__rebuild_impl(type, handle, kind, maxvalue, name); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__count__doc__, "_count($self, /)\n" "--\n" "\n" "Num of `acquire()`s minus num of `release()`s for this process."); #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF \ {"_count", (PyCFunction)_multiprocessing_SemLock__count, METH_NOARGS, _multiprocessing_SemLock__count__doc__}, static PyObject * _multiprocessing_SemLock__count_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__count_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_mine__doc__, "_is_mine($self, /)\n" "--\n" "\n" "Whether the lock is owned by this thread."); #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF \ {"_is_mine", (PyCFunction)_multiprocessing_SemLock__is_mine, METH_NOARGS, _multiprocessing_SemLock__is_mine__doc__}, static PyObject * _multiprocessing_SemLock__is_mine_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_mine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_mine_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, "_get_value($self, /)\n" "--\n" "\n" "Get the value of the semaphore."); #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__get_value(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__get_value_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, "_is_zero($self, /)\n" "--\n" "\n" "Return whether semaphore has value zero."); #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, static PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_zero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_zero_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__after_fork__doc__, "_after_fork($self, /)\n" "--\n" "\n" "Rezero the net acquisition count after fork()."); #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF \ {"_after_fork", (PyCFunction)_multiprocessing_SemLock__after_fork, METH_NOARGS, _multiprocessing_SemLock__after_fork__doc__}, static PyObject * _multiprocessing_SemLock__after_fork_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__after_fork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__after_fork_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___enter____doc__, "__enter__($self, /)\n" "--\n" "\n" "Enter the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF \ {"__enter__", (PyCFunction)_multiprocessing_SemLock___enter__, METH_NOARGS, _multiprocessing_SemLock___enter____doc__}, static PyObject * _multiprocessing_SemLock___enter___impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock___enter__(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock___enter___impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___exit____doc__, "__exit__($self, exc_type=None, exc_value=None, exc_tb=None, /)\n" "--\n" "\n" "Exit the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF \ {"__exit__", _PyCFunction_CAST(_multiprocessing_SemLock___exit__), METH_FASTCALL, _multiprocessing_SemLock___exit____doc__}, static PyObject * _multiprocessing_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb); static PyObject * _multiprocessing_SemLock___exit__(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; PyObject *exc_type = Py_None; PyObject *exc_value = Py_None; PyObject *exc_tb = Py_None; if (!_PyArg_CheckPositional("__exit__", nargs, 0, 3)) { goto exit; } if (nargs < 1) { goto skip_optional; } exc_type = args[0]; if (nargs < 2) { goto skip_optional; } exc_value = args[1]; if (nargs < 3) { goto skip_optional; } exc_tb = args[2]; skip_optional: return_value = _multiprocessing_SemLock___exit___impl(self, exc_type, exc_value, exc_tb); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #ifndef _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF) */ /*[clinic end generated code: output=dae57a702cc01512 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/multiprocess.c000066400000000000000000000162541455552142400274610ustar00rootroot00000000000000/* * Extension module used by multiprocess package * * multiprocess.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /*[python input] class HANDLE_converter(CConverter): type = "HANDLE" format_unit = '"F_HANDLE"' def parse_arg(self, argname, displayname): return """ {paramname} = PyLong_AsVoidPtr({argname}); if (!{paramname} && PyErr_Occurred()) {{{{ goto exit; }}}} """.format(argname=argname, paramname=self.parser_name) [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3e537d244034affb]*/ /*[clinic input] module _multiprocess [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=01e0745f380ac6e3]*/ #include "clinic/multiprocessing.c.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS /*[clinic input] _multiprocess.closesocket handle: HANDLE / [clinic start generated code]*/ static PyObject * _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) /*[clinic end generated code: output=214f359f900966f4 input=8a20706dd386c6cc]*/ { int ret; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } /*[clinic input] _multiprocess.recv handle: HANDLE size: int / [clinic start generated code]*/ static PyObject * _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) /*[clinic end generated code: output=92322781ba9ff598 input=6a5b0834372cee5b]*/ { int nread; PyObject *buf; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } /*[clinic input] _multiprocess.send handle: HANDLE buf: Py_buffer / [clinic start generated code]*/ static PyObject * _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) /*[clinic end generated code: output=52d7df0519c596cb input=41dce742f98d2210]*/ { int ret, length; length = (int)Py_MIN(buf->len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf->buf, length, 0); Py_END_ALLOW_THREADS if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /*[clinic input] _multiprocess.sem_unlink name: str / [clinic start generated code]*/ static PyObject * _multiprocess_sem_unlink_impl(PyObject *module, const char *name) /*[clinic end generated code: output=fcbfeb1ed255e647 input=bf939aff9564f1d5]*/ { return _PyMp_sem_unlink(name); } /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS _MULTIPROCESSING_CLOSESOCKET_METHODDEF _MULTIPROCESSING_RECV_METHODDEF _MULTIPROCESSING_SEND_METHODDEF #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) _MULTIPROCESSING_SEM_UNLINK_METHODDEF #endif {NULL} }; /* * Initialize */ static int multiprocess_exec(PyObject *module) { #ifdef HAVE_MP_SEMAPHORE PyTypeObject *semlock_type = (PyTypeObject *)PyType_FromModuleAndSpec( module, &_PyMp_SemLockType_spec, NULL); if (semlock_type == NULL) { return -1; } int rc = PyModule_AddType(module, semlock_type); Py_DECREF(semlock_type); if (rc < 0) { return -1; } PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) { py_sem_value_max = PyLong_FromLong(INT_MAX); } else { py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); } if (py_sem_value_max == NULL) { return -1; } if (PyDict_SetItemString(semlock_type->tp_dict, "SEM_VALUE_MAX", py_sem_value_max) < 0) { Py_DECREF(py_sem_value_max); return -1; } Py_DECREF(py_sem_value_max); #endif /* Add configuration macros */ PyObject *flags = PyDict_New(); if (!flags) { return -1; } #define ADD_FLAG(name) \ do { \ PyObject *value = PyLong_FromLong(name); \ if (value == NULL) { \ Py_DECREF(flags); \ return -1; \ } \ if (PyDict_SetItemString(flags, #name, value) < 0) { \ Py_DECREF(flags); \ Py_DECREF(value); \ return -1; \ } \ Py_DECREF(value); \ } while (0) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", flags) < 0) { Py_DECREF(flags); return -1; } return 0; } static PyModuleDef_Slot multiprocess_slots[] = { {Py_mod_exec, multiprocess_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {0, NULL} }; static struct PyModuleDef multiprocess_module = { PyModuleDef_HEAD_INIT, .m_name = "_multiprocess", .m_size = 0, .m_methods = module_methods, .m_slots = multiprocess_slots, }; PyMODINIT_FUNC PyInit__multiprocess(void) { return PyModuleDef_Init(&multiprocess_module); } uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/multiprocess.h000066400000000000000000000043051455552142400274600ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN # endif # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX # define HAVE_MP_SEMAPHORE #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # define HAVE_MP_SEMAPHORE # include typedef sem_t *SEM_HANDLE; # endif #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyType_Spec _PyMp_SemLockType_spec; extern PyObject *_PyMp_sem_unlink(const char *name); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/posixshmem.c000066400000000000000000000057541455552142400271270ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #define PY_SSIZE_T_CLEAN #include // for shm_open() and shm_unlink() #ifdef HAVE_SYS_MMAN_H #include #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static PyModuleDef_Slot module_slots[] = { {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {0, NULL} }; static struct PyModuleDef _posixshmemmodule = { PyModuleDef_HEAD_INIT, .m_name = "_posixshmem", .m_doc = "POSIX shared memory module", .m_size = 0, .m_methods = module_methods, .m_slots = module_slots, }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { return PyModuleDef_Init(&_posixshmemmodule); } uqfoundation-multiprocess-b3457a5/py3.12/Modules/_multiprocess/semaphore.c000066400000000000000000000512601455552142400267070ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" #ifdef HAVE_MP_SEMAPHORE enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; /*[python input] class SEM_HANDLE_converter(CConverter): type = "SEM_HANDLE" format_unit = '"F_SEM_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3e0ad43e482d8716]*/ /*[clinic input] module _multiprocess class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=935fb41b7d032599]*/ #include "clinic/semaphore.c.h" #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ { double timeout; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif // ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } // #endif /* !HAVE_SEM_TIMEDWAIT */ /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ { int res, err = 0; struct timespec deadline = {0}; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) { return NULL; } if (timeout < 0.0) { timeout = 0.0; } struct timeval now; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } long sec = (long) timeout; long nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } /*[clinic input] @classmethod _multiprocess.SemLock.__new__ kind: int value: int maxvalue: int name: str unlink: bool [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink) /*[clinic end generated code: output=30727e38f5f7577a input=fdaeb69814471c5b]*/ { SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); return NULL; } /*[clinic input] @classmethod _multiprocess.SemLock._rebuild handle: SEM_HANDLE kind: int maxvalue: int name: str(accept={str, NoneType}) / [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name) /*[clinic end generated code: output=2aaee14f063f3bd9 input=f7040492ac6d9962]*/ { char *name_copy = NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyErr_SetFromErrno(PyExc_OSError); PyMem_Free(name_copy); return NULL; } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); tp->tp_free(self); Py_DECREF(tp); } /*[clinic input] _multiprocess.SemLock._count Num of `acquire()`s minus num of `release()`s for this process. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__count_impl(SemLockObject *self) /*[clinic end generated code: output=5ba8213900e517bb input=36fc59b1cd1025ab]*/ { return PyLong_FromLong((long)self->count); } /*[clinic input] _multiprocess.SemLock._is_mine Whether the lock is owned by this thread. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_mine_impl(SemLockObject *self) /*[clinic end generated code: output=92dc98863f4303be input=a96664cb2f0093ba]*/ { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } /*[clinic input] _multiprocess.SemLock._get_value Get the value of the semaphore. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__get_value_impl(SemLockObject *self) /*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } /*[clinic input] _multiprocess.SemLock._is_zero Return whether semaphore has value zero. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_zero_impl(SemLockObject *self) /*[clinic end generated code: output=815d4c878c806ed7 input=294a446418d31347]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } /*[clinic input] _multiprocess.SemLock._after_fork Rezero the net acquisition count after fork(). [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__after_fork_impl(SemLockObject *self) /*[clinic end generated code: output=718bb27914c6a6c1 input=190991008a76621e]*/ { self->count = 0; Py_RETURN_NONE; } /*[clinic input] _multiprocess.SemLock.__enter__ Enter the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___enter___impl(SemLockObject *self) /*[clinic end generated code: output=beeb2f07c858511f input=c5e27d594284690b]*/ { return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); } /*[clinic input] _multiprocess.SemLock.__exit__ exc_type: object = None exc_value: object = None exc_tb: object = None / Exit the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb) /*[clinic end generated code: output=3b37c1a9f8b91a03 input=7d644b64a89903f8]*/ { return _multiprocess_SemLock_release_impl(self); } static int semlock_traverse(SemLockObject *s, visitproc visit, void *arg) { Py_VISIT(Py_TYPE(s)); return 0; } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { _MULTIPROCESS_SEMLOCK_ACQUIRE_METHODDEF _MULTIPROCESS_SEMLOCK_RELEASE_METHODDEF _MULTIPROCESS_SEMLOCK___ENTER___METHODDEF _MULTIPROCESS_SEMLOCK___EXIT___METHODDEF _MULTIPROCESS_SEMLOCK__COUNT_METHODDEF _MULTIPROCESS_SEMLOCK__IS_MINE_METHODDEF _MULTIPROCESS_SEMLOCK__GET_VALUE_METHODDEF _MULTIPROCESS_SEMLOCK__IS_ZERO_METHODDEF _MULTIPROCESS_SEMLOCK__REBUILD_METHODDEF _MULTIPROCESS_SEMLOCK__AFTER_FORK_METHODDEF {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(SemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(SemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ static PyType_Slot _PyMp_SemLockType_slots[] = { {Py_tp_dealloc, semlock_dealloc}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_setattro, PyObject_GenericSetAttr}, {Py_tp_methods, semlock_methods}, {Py_tp_members, semlock_members}, {Py_tp_alloc, PyType_GenericAlloc}, {Py_tp_new, _multiprocess_SemLock}, {Py_tp_traverse, semlock_traverse}, {Py_tp_free, PyObject_GC_Del}, {Py_tp_doc, (void *)PyDoc_STR("Semaphore/Mutex type")}, {0, 0}, }; PyType_Spec _PyMp_SemLockType_spec = { .name = "_multiprocess.SemLock", .basicsize = sizeof(SemLockObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = _PyMp_SemLockType_slots, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(const char *name) { if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } #endif // HAVE_MP_SEMAPHORE uqfoundation-multiprocess-b3457a5/py3.12/README_MODS000066400000000000000000001455011455552142400217640ustar00rootroot00000000000000cp -rf py3.11/examples . cp -rf py3.11/doc . cp -f py3.11/index.html . cp -rf py3.11/_multiprocess _multiprocess cp -rf py3.11/multiprocess multiprocess cp -rf Python-3.11.0/Modules/_multiprocessing Modules/_multiprocess # ---------------------------------------------------------------------- diff Python-3.12.0a1/Modules/_multiprocessing/semaphore.c Modules/_multiprocess/semaphore.c 10c10 < #include "multiprocessing.h" --- > #include "multiprocess.h" 35,36c35,36 < module _multiprocessing < class _multiprocessing.SemLock "SemLockObject *" "&_PyMp_SemLockType" --- > module _multiprocess > class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" 80c80 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 89c89 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 171c171 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 177c177 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 232c232 < #ifndef HAVE_SEM_TIMEDWAIT --- > // ifndef HAVE_SEM_TIMEDWAIT 293c293 < #endif /* !HAVE_SEM_TIMEDWAIT */ --- > // #endif /* !HAVE_SEM_TIMEDWAIT */ 296c296 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 305c305 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 381c381 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 387c387 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 471c471 < _multiprocessing.SemLock.__new__ --- > _multiprocess.SemLock.__new__ 482c482 < _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, --- > _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, 530c530 < _multiprocessing.SemLock._rebuild --- > _multiprocess.SemLock._rebuild 541c541 < _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, --- > _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, 581c581 < _multiprocessing.SemLock._count --- > _multiprocess.SemLock._count 587c587 < _multiprocessing_SemLock__count_impl(SemLockObject *self) --- > _multiprocess_SemLock__count_impl(SemLockObject *self) 594c594 < _multiprocessing.SemLock._is_mine --- > _multiprocess.SemLock._is_mine 600c600 < _multiprocessing_SemLock__is_mine_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_mine_impl(SemLockObject *self) 608c608 < _multiprocessing.SemLock._get_value --- > _multiprocess.SemLock._get_value 614c614 < _multiprocessing_SemLock__get_value_impl(SemLockObject *self) --- > _multiprocess_SemLock__get_value_impl(SemLockObject *self) 633c633 < _multiprocessing.SemLock._is_zero --- > _multiprocess.SemLock._is_zero 639c639 < _multiprocessing_SemLock__is_zero_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_zero_impl(SemLockObject *self) 661c661 < _multiprocessing.SemLock._after_fork --- > _multiprocess.SemLock._after_fork 667c667 < _multiprocessing_SemLock__after_fork_impl(SemLockObject *self) --- > _multiprocess_SemLock__after_fork_impl(SemLockObject *self) 675c675 < _multiprocessing.SemLock.__enter__ --- > _multiprocess.SemLock.__enter__ 681c681 < _multiprocessing_SemLock___enter___impl(SemLockObject *self) --- > _multiprocess_SemLock___enter___impl(SemLockObject *self) 684c684 < return _multiprocessing_SemLock_acquire_impl(self, 1, Py_None); --- > return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); 688c688 < _multiprocessing.SemLock.__exit__ --- > _multiprocess.SemLock.__exit__ 699c699 < _multiprocessing_SemLock___exit___impl(SemLockObject *self, --- > _multiprocess_SemLock___exit___impl(SemLockObject *self, 704c704 < return _multiprocessing_SemLock_release_impl(self); --- > return _multiprocess_SemLock_release_impl(self); 759c759 < {Py_tp_new, _multiprocessing_SemLock}, --- > {Py_tp_new, _multiprocess_SemLock}, 767c767 < .name = "_multiprocessing.SemLock", --- > .name = "_multiprocess.SemLock", diff Python-3.12.0a1/Modules/_multiprocessing/multiprocessing.c Modules/_multiprocess/multiprocess.c 2c2 < * Extension module used by multiprocessing package --- > * Extension module used by multiprocess package 4c4 < * multiprocessing.c --- > * multiprocess.c 10c10 < #include "multiprocessing.h" --- > #include "multiprocess.h" 29c29 < module _multiprocessing --- > module _multiprocess 76c76 < _multiprocessing.closesocket --- > _multiprocess.closesocket 84c84 < _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle) --- > _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) 99c99 < _multiprocessing.recv --- > _multiprocess.recv 108c108 < _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size) --- > _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) 131c131 < _multiprocessing.send --- > _multiprocess.send 140c140 < _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) --- > _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) 159c159 < _multiprocessing.sem_unlink --- > _multiprocess.sem_unlink 167c167 < _multiprocessing_sem_unlink_impl(PyObject *module, const char *name) --- > _multiprocess_sem_unlink_impl(PyObject *module, const char *name) 195c195 < multiprocessing_exec(PyObject *module) --- > multiprocess_exec(PyObject *module) 277,278c277,278 < static PyModuleDef_Slot multiprocessing_slots[] = { < {Py_mod_exec, multiprocessing_exec}, --- > static PyModuleDef_Slot multiprocess_slots[] = { > {Py_mod_exec, multiprocess_exec}, 282c282 < static struct PyModuleDef multiprocessing_module = { --- > static struct PyModuleDef multiprocess_module = { 284c284 < .m_name = "_multiprocessing", --- > .m_name = "_multiprocess", 287c287 < .m_slots = multiprocessing_slots, --- > .m_slots = multiprocess_slots, 291c291 < PyInit__multiprocessing(void) --- > PyInit__multiprocess(void) 293c293 < return PyModuleDef_Init(&multiprocessing_module); --- > return PyModuleDef_Init(&multiprocess_module); # ---------------------------------------------------------------------- diff Python-3.11.0/Lib/multiprocessing/managers.py Python-3.12.0a1/Lib/multiprocessing/managers.py 436d435 < obj, exposed, gettypeid = self.id_to_obj[ident] diff Python-3.11.0/Lib/multiprocessing/resource_tracker.py Python-3.12.0a1/Lib/multiprocessing/resource_tracker.py 164c164 < if len(name) > 512: --- > if len(msg) > 512: 167c167 < raise ValueError('name too long') --- > raise ValueError('msg too long') # ---------------------------------------------------------------------- diff Python-3.11.0/Lib/test/_test_multiprocessing.py Python-3.12.0a1/Lib/test/_test_multiprocessing.py 126a127,128 > WAIT_ACTIVE_CHILDREN_TIMEOUT = 5.0 > 4321,4325c4323,4325 < deadline = time.monotonic() + support.LONG_TIMEOUT < t = 0.1 < while time.monotonic() < deadline: < time.sleep(t) < t = min(t*2, 5) --- > err_msg = ("A SharedMemory segment was leaked after " > "a process was abruptly terminated") > for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg): 4330,4332d4329 < else: < raise AssertionError("A SharedMemory segment was leaked after" < " a process was abruptly terminated.") 5295c5292 < import time, os, tempfile --- > import time, os 5301d5297 < rand = tempfile._RandomNameSequence() 5342,5344c5338,5341 < deadline = time.monotonic() + support.LONG_TIMEOUT < while time.monotonic() < deadline: < time.sleep(.5) --- > err_msg = (f"A {rtype} resource was leaked after a process was " > f"abruptly terminated") > for _ in support.sleeping_retry(support.SHORT_TIMEOUT, > err_msg): 5352,5355c5349 < else: < raise AssertionError( < f"A {rtype} resource was leaked after a process was " < f"abruptly terminated.") --- > 5440a5435,5442 > def test_too_long_name_resource(self): > # gh-96819: Resource names that will make the length of a write to a pipe > # greater than PIPE_BUF are not allowed > rtype = "shared_memory" > too_long_name_resource = "a" * (512 - len(rtype)) > with self.assertRaises(ValueError): > resource_tracker.register(too_long_name_resource, rtype) > 5582a5585,5586 > > timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT 5584,5593c5588,5589 < t = 0.01 < while len(multiprocessing.active_children()) > 1: < time.sleep(t) < t *= 2 < dt = time.monotonic() - start_time < if dt >= 5.0: < test.support.environment_altered = True < support.print_warning(f"multiprocessing.Manager still has " < f"{multiprocessing.active_children()} " < f"active children after {dt} seconds") --- > for _ in support.sleeping_retry(timeout, error=False): > if len(multiprocessing.active_children()) <= 1: 5594a5591,5596 > else: > dt = time.monotonic() - start_time > support.environment_altered = True > support.print_warning(f"multiprocessing.Manager still has " > f"{multiprocessing.active_children()} " > f"active children after {dt:.1f} seconds") 5699,5701c5701,5704 < assert obj[0] == 5 < assert obj.count(5) == 1 < assert obj.index(5) == 0 --- > case = unittest.TestCase() > case.assertEqual(obj[0], 5) > case.assertEqual(obj.count(5), 1) > case.assertEqual(obj.index(5), 0) 5706,5707c5709,5710 < assert len(obj) == 1 < assert obj.pop(0) == 5 --- > case.assertEqual(len(obj), 1) > case.assertEqual(obj.pop(0), 5) 5713c5716 < assert not o --- > self.assertIsNotNone(o) 5718,5725c5721,5729 < assert len(obj) == 1 < assert obj['foo'] == 5 < assert obj.get('foo') == 5 < assert list(obj.items()) == [('foo', 5)] < assert list(obj.keys()) == ['foo'] < assert list(obj.values()) == [5] < assert obj.copy() == {'foo': 5} < assert obj.popitem() == ('foo', 5) --- > case = unittest.TestCase() > case.assertEqual(len(obj), 1) > case.assertEqual(obj['foo'], 5) > case.assertEqual(obj.get('foo'), 5) > case.assertListEqual(list(obj.items()), [('foo', 5)]) > case.assertListEqual(list(obj.keys()), ['foo']) > case.assertListEqual(list(obj.values()), [5]) > case.assertDictEqual(obj.copy(), {'foo': 5}) > case.assertTupleEqual(obj.popitem(), ('foo', 5)) 5731c5735 < assert not o --- > self.assertIsNotNone(o) 5736,5737c5740,5742 < assert obj.value == 1 < assert obj.get() == 1 --- > case = unittest.TestCase() > case.assertEqual(obj.value, 1) > case.assertEqual(obj.get(), 1) 5748,5751c5753,5757 < assert obj[0] == 0 < assert obj[1] == 1 < assert len(obj) == 2 < assert list(obj) == [0, 1] --- > case = unittest.TestCase() > case.assertEqual(obj[0], 0) > case.assertEqual(obj[1], 1) > case.assertEqual(len(obj), 2) > case.assertListEqual(list(obj), [0, 1]) 5759,5760c5765,5767 < assert obj.x == 0 < assert obj.y == 1 --- > case = unittest.TestCase() > case.assertEqual(obj.x, 0) > case.assertEqual(obj.y, 1) 5890a5898 > timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT 5892,5901c5900,5901 < t = 0.01 < while len(multiprocessing.active_children()) > 1: < time.sleep(t) < t *= 2 < dt = time.monotonic() - start_time < if dt >= 5.0: < test.support.environment_altered = True < support.print_warning(f"multiprocessing.Manager still has " < f"{multiprocessing.active_children()} " < f"active children after {dt} seconds") --- > for _ in support.sleeping_retry(timeout, error=False): > if len(multiprocessing.active_children()) <= 1: 5902a5903,5908 > else: > dt = time.monotonic() - start_time > support.environment_altered = True > support.print_warning(f"multiprocessing.Manager still has " > f"{multiprocessing.active_children()} " > f"active children after {dt:.1f} seconds") # ---------------------------------------------------------------------- diff Python-3.12.0a2/Modules/_multiprocessing/semaphore.c Python-3.12.0a3/Modules/_multiprocessing/semaphore.c 82c82 < block as blocking: bool(accept={int}) = True --- > block as blocking: bool = True 91c91 < /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ --- > /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ 298c298 < block as blocking: bool(accept={int}) = True --- > block as blocking: bool = True 307c307 < /*[clinic end generated code: output=f9998f0b6b0b0872 input=86f05662cf753eb4]*/ --- > /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ 477c477 < unlink: bool(accept={int}) --- > unlink: bool 484c484 < /*[clinic end generated code: output=30727e38f5f7577a input=b378c3ee27d3a0fa]*/ --- > /*[clinic end generated code: output=30727e38f5f7577a input=fdaeb69814471c5b]*/ diff Python-3.12.0a2/Modules/_multiprocessing/clinic/semaphore.c.h Python-3.12.0a3/Modules/_multiprocessing/clinic/semaphore.c.h 68,69c68,69 < blocking = _PyLong_AsInt(args[0]); < if (blocking == -1 && PyErr_Occurred()) { --- > blocking = PyObject_IsTrue(args[0]); > if (blocking < 0) { 165,166c165,166 < blocking = _PyLong_AsInt(args[0]); < if (blocking == -1 && PyErr_Occurred()) { --- > blocking = PyObject_IsTrue(args[0]); > if (blocking < 0) { 278,279c278,279 < unlink = _PyLong_AsInt(fastargs[4]); < if (unlink == -1 && PyErr_Occurred()) { --- > unlink = PyObject_IsTrue(fastargs[4]); > if (unlink < 0) { 545c545 < /*[clinic end generated code: output=720d7d0066dc0954 input=a9049054013a1b77]*/ --- > /*[clinic end generated code: output=dae57a702cc01512 input=a9049054013a1b77]*/ diff Python-3.12.0a2/Lib/multiprocessing/connection.py Python-3.12.0a3/Lib/multiprocessing/connection.py 730a731,798 > # multiprocessing.connection Authentication Handshake Protocol Description > # (as documented for reference after reading the existing code) > # ============================================================================= > # > # On Windows: native pipes with "overlapped IO" are used to send the bytes, > # instead of the length prefix SIZE scheme described below. (ie: the OS deals > # with message sizes for us) > # > # Protocol error behaviors: > # > # On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater > # than the requested maxsize to receive, or receiving fewer than SIZE bytes > # results in the connection being closed and auth to fail. > # > # On Windows, receiving too few bytes is never a low level _recv_bytes read > # error, receiving too many will trigger an error only if receive maxsize > # value was larger than 128 OR the if the data arrived in smaller pieces. > # > # Serving side Client side > # ------------------------------ --------------------------------------- > # 0. Open a connection on the pipe. > # 1. Accept connection. > # 2. New random 20 bytes -> MESSAGE > # 3. send 4 byte length (net order) > # prefix followed by: > # b'#CHALLENGE#' + MESSAGE > # 4. Receive 4 bytes, parse as network byte > # order integer. If it is -1, receive an > # additional 8 bytes, parse that as network > # byte order. The result is the length of > # the data that follows -> SIZE. > # 5. Receive min(SIZE, 256) bytes -> M1 > # 6. Assert that M1 starts with: > # b'#CHALLENGE#' > # 7. Strip that prefix from M1 into -> M2 > # 8. Compute HMAC-MD5 of AUTHKEY, M2 -> C_DIGEST > # 9. Send 4 byte length prefix (net order) > # followed by C_DIGEST bytes. > # 10. Compute HMAC-MD5 of AUTHKEY, > # MESSAGE into -> M_DIGEST. > # 11. Receive 4 or 4+8 byte length > # prefix (#4 dance) -> SIZE. > # 12. Receive min(SIZE, 256) -> C_D. > # 13. Compare M_DIGEST == C_D: > # 14a: Match? Send length prefix & > # b'#WELCOME#' > # <- RETURN > # 14b: Mismatch? Send len prefix & > # b'#FAILURE#' > # <- CLOSE & AuthenticationError > # 15. Receive 4 or 4+8 byte length prefix (net > # order) again as in #4 into -> SIZE. > # 16. Receive min(SIZE, 256) bytes -> M3. > # 17. Compare M3 == b'#WELCOME#': > # 17a. Match? <- RETURN > # 17b. Mismatch? <- CLOSE & AuthenticationError > # > # If this RETURNed, the connection remains open: it has been authenticated. > # > # Length prefixes are used consistently even though every step so far has > # always been a singular specific fixed length. This may help us evolve > # the protocol in the future without breaking backwards compatibility. > # > # Similarly the initial challenge message from the serving side has always > # been 20 bytes, but clients can accept a 100+ so using the length of the > # opening challenge message as an indicator of protocol version may work. > > diff Python-3.12.0a2/Lib/multiprocessing/pool.py Python-3.12.0a3/Lib/multiprocessing/pool.py 699c699 < "Cannot have cache with result_hander not alive") --- > "Cannot have cache with result_handler not alive") diff Python-3.12.0a2/Lib/multiprocessing/shared_memory.py Python-3.12.0a3/Lib/multiprocessing/shared_memory.py 176c176,179 < size = _winapi.VirtualQuerySize(p_buf) --- > try: > size = _winapi.VirtualQuerySize(p_buf) > finally: > _winapi.UnmapViewOfFile(p_buf) diff Python-3.12.0a2/Lib/test/_test_multiprocessing.py Python-3.12.0a3/Lib/test/_test_multiprocessing.py 6045c6045 < s = SemLock(1, 0, 10, name, 0) --- > s = SemLock(1, 0, 10, name, False) # ---------------------------------------------------------------------- diff Python-3.12.0a3/Lib/multiprocessing/queues.py Python-3.12.0a4/Lib/multiprocessing/queues.py 282a283,284 > __class_getitem__ = classmethod(types.GenericAlias) > # ---------------------------------------------------------------------- diff Python-3.12.0a4/Lib/multiprocessing/context.py Python-3.12.0a5/Lib/multiprocessing/context.py 260a261 > """Returns a list of the supported start methods, default first.""" # ---------------------------------------------------------------------- diff Python-3.12.0a4/Lib/test/_test_multiprocessing.py Python-3.12.0a5/Lib/test/_test_multiprocessing.py 4970c4970 < def run_in_child(cls): --- > def run_in_child(cls, start_method): 4972,4974c4972,4976 < r, w = multiprocessing.Pipe(duplex=False) < p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) < p.start() --- > mp = multiprocessing.get_context(start_method) > r, w = mp.Pipe(duplex=False) > p = mp.Process(target=cls.run_in_grandchild, args=(w,)) > with warnings.catch_warnings(category=DeprecationWarning): > p.start() 4985,4986c4987,4990 < prog = ('from test._test_multiprocessing import TestFlags; ' + < 'TestFlags.run_in_child()') --- > prog = ( > 'from test._test_multiprocessing import TestFlags; ' > f'TestFlags.run_in_child({multiprocessing.get_start_method()!r})' > ) # ---------------------------------------------------------------------- diff Python-3.12.0a5/Modules/_multiprocessing/multiprocessing.h Python-3.12.0a7/Modules/_multiprocessing/multiprocessing.h 15c15,17 < # define WIN32_LEAN_AND_MEAN --- > # ifndef WIN32_LEAN_AND_MEAN > # define WIN32_LEAN_AND_MEAN > # endif # ---------------------------------------------------------------------- diff Python-3.12.0a7/Modules/_multiprocessing/multiprocessing.c Python-3.12.0b1/Modules/_multiprocessing/multiprocessing.c 278a279 > {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, diff Python-3.12.0a7/Modules/_multiprocessing/posixshmem.c Python-3.12.0b1/Modules/_multiprocessing/posixshmem.c 112a113,118 > static PyModuleDef_Slot module_slots[] = { > {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, > {0, NULL} > }; > > 118a125 > .m_slots = module_slots, diff Python-3.12.0a7/Lib/multiprocessing/connection.py Python-3.12.0b1/Lib/multiprocessing/connection.py 725c725 < MESSAGE_LENGTH = 20 --- > MESSAGE_LENGTH = 40 # MUST be > 20 727,729c727,729 < CHALLENGE = b'#CHALLENGE#' < WELCOME = b'#WELCOME#' < FAILURE = b'#FAILURE#' --- > _CHALLENGE = b'#CHALLENGE#' > _WELCOME = b'#WELCOME#' > _FAILURE = b'#FAILURE#' 753c753,758 < # 2. New random 20 bytes -> MESSAGE --- > # 2. Random 20+ bytes -> MESSAGE > # Modern servers always send > # more than 20 bytes and include > # a {digest} prefix on it with > # their preferred HMAC digest. > # Legacy ones send ==20 bytes. 766c771,778 < # 8. Compute HMAC-MD5 of AUTHKEY, M2 -> C_DIGEST --- > # 7.1. Parse M2: if it is exactly 20 bytes in > # length this indicates a legacy server > # supporting only HMAC-MD5. Otherwise the > # 7.2. preferred digest is looked up from an > # expected "{digest}" prefix on M2. No prefix > # or unsupported digest? <- AuthenticationError > # 7.3. Put divined algorithm name in -> D_NAME > # 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST 769,771c781 < # 10. Compute HMAC-MD5 of AUTHKEY, < # MESSAGE into -> M_DIGEST. < # 11. Receive 4 or 4+8 byte length --- > # 10. Receive 4 or 4+8 byte length 773c783,796 < # 12. Receive min(SIZE, 256) -> C_D. --- > # 11. Receive min(SIZE, 256) -> C_D. > # 11.1. Parse C_D: legacy servers > # accept it as is, "md5" -> D_NAME > # 11.2. modern servers check the length > # of C_D, IF it is 16 bytes? > # 11.2.1. "md5" -> D_NAME > # and skip to step 12. > # 11.3. longer? expect and parse a "{digest}" > # prefix into -> D_NAME. > # Strip the prefix and store remaining > # bytes in -> C_D. > # 11.4. Don't like D_NAME? <- AuthenticationError > # 12. Compute HMAC-D_NAME of AUTHKEY, > # MESSAGE into -> M_DIGEST. 790,796c813,863 < # Length prefixes are used consistently even though every step so far has < # always been a singular specific fixed length. This may help us evolve < # the protocol in the future without breaking backwards compatibility. < # < # Similarly the initial challenge message from the serving side has always < # been 20 bytes, but clients can accept a 100+ so using the length of the < # opening challenge message as an indicator of protocol version may work. --- > # Length prefixes are used consistently. Even on the legacy protocol, this > # was good fortune and allowed us to evolve the protocol by using the length > # of the opening challenge or length of the returned digest as a signal as > # to which protocol the other end supports. > > _ALLOWED_DIGESTS = frozenset( > {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) > _MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) > > # Old hmac-md5 only server versions from Python <=3.11 sent a message of this > # length. It happens to not match the length of any supported digest so we can > # use a message of this length to indicate that we should work in backwards > # compatible md5-only mode without a {digest_name} prefix on our response. > _MD5ONLY_MESSAGE_LENGTH = 20 > _MD5_DIGEST_LEN = 16 > _LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) > > > def _get_digest_name_and_payload(message: bytes) -> (str, bytes): > """Returns a digest name and the payload for a response hash. > > If a legacy protocol is detected based on the message length > or contents the digest name returned will be empty to indicate > legacy mode where MD5 and no digest prefix should be sent. > """ > # modern message format: b"{digest}payload" longer than 20 bytes > # legacy message format: 16 or 20 byte b"payload" > if len(message) in _LEGACY_LENGTHS: > # Either this was a legacy server challenge, or we're processing > # a reply from a legacy client that sent an unprefixed 16-byte > # HMAC-MD5 response. All messages using the modern protocol will > # be longer than either of these lengths. > return '', message > if (message.startswith(b'{') and > (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): > digest = message[1:curly] > if digest in _ALLOWED_DIGESTS: > payload = message[curly+1:] > return digest.decode('ascii'), payload > raise AuthenticationError( > 'unsupported message length, missing digest prefix, ' > f'or unsupported digest: {message=}') > > > def _create_response(authkey, message): > """Create a MAC based on authkey and message > > The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or > the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response > is the raw MAC, otherwise the response is prefixed with '{digest_name}', > e.g. b'{sha256}abcdefg...' 798,799c865,894 < < def deliver_challenge(connection, authkey): --- > Note: The MAC protects the entire message including the digest_name prefix. > """ > import hmac > digest_name = _get_digest_name_and_payload(message)[0] > # The MAC protects the entire message: digest header and payload. > if not digest_name: > # Legacy server without a {digest} prefix on message. > # Generate a legacy non-prefixed HMAC-MD5 reply. > try: > return hmac.new(authkey, message, 'md5').digest() > except ValueError: > # HMAC-MD5 is not available (FIPS mode?), fall back to > # HMAC-SHA2-256 modern protocol. The legacy server probably > # doesn't support it and will reject us anyways. :shrug: > digest_name = 'sha256' > # Modern protocol, indicate the digest used in the reply. > response = hmac.new(authkey, message, digest_name).digest() > return b'{%s}%s' % (digest_name.encode('ascii'), response) > > > def _verify_challenge(authkey, message, response): > """Verify MAC challenge > > If our message did not include a digest_name prefix, the client is allowed > to select a stronger digest_name from _ALLOWED_DIGESTS. > > In case our message is prefixed, a client cannot downgrade to a weaker > algorithm, because the MAC is calculated over the entire message > including the '{digest_name}' prefix. > """ 800a896,910 > response_digest, response_mac = _get_digest_name_and_payload(response) > response_digest = response_digest or 'md5' > try: > expected = hmac.new(authkey, message, response_digest).digest() > except ValueError: > raise AuthenticationError(f'{response_digest=} unsupported') > if len(expected) != len(response_mac): > raise AuthenticationError( > f'expected {response_digest!r} of length {len(expected)} ' > f'got {len(response_mac)}') > if not hmac.compare_digest(expected, response_mac): > raise AuthenticationError('digest received was wrong') > > > def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): 803a914 > assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" 805,806c916,920 < connection.send_bytes(CHALLENGE + message) < digest = hmac.new(authkey, message, 'md5').digest() --- > message = b'{%s}%s' % (digest_name.encode('ascii'), message) > # Even when sending a challenge to a legacy client that does not support > # digest prefixes, they'll take the entire thing as a challenge and > # respond to it with a raw HMAC-MD5. > connection.send_bytes(_CHALLENGE + message) 808,809c922,926 < if response == digest: < connection.send_bytes(WELCOME) --- > try: > _verify_challenge(authkey, message, response) > except AuthenticationError: > connection.send_bytes(_FAILURE) > raise 811,812c928 < connection.send_bytes(FAILURE) < raise AuthenticationError('digest received was wrong') --- > connection.send_bytes(_WELCOME) 814,815c930,931 < def answer_challenge(connection, authkey): < import hmac --- > > def answer_challenge(connection, authkey: bytes): 820,822c936,942 < assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message < message = message[len(CHALLENGE):] < digest = hmac.new(authkey, message, 'md5').digest() --- > if not message.startswith(_CHALLENGE): > raise AuthenticationError( > f'Protocol error, expected challenge: {message=}') > message = message[len(_CHALLENGE):] > if len(message) < _MD5ONLY_MESSAGE_LENGTH: > raise AuthenticationError('challenge too short: {len(message)} bytes') > digest = _create_response(authkey, message) 825c945 < if response != WELCOME: --- > if response != _WELCOME: diff Python-3.12.0a7/Lib/multiprocessing/process.py Python-3.12.0b1/Lib/multiprocessing/process.py 64c64 < if p._popen.poll() is not None: --- > if (child_popen := p._popen) and child_popen.poll() is not None: diff Python-3.12.0a7/Lib/test/_test_multiprocessing.py Python-3.12.0b1/Lib/test/_test_multiprocessing.py 50a51 > from multiprocessing.connection import wait, AuthenticationError 134,135d134 < from multiprocessing.connection import wait < 3045c3044 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 3534c3533 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 3837c3836 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 4639c4638 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 4659c4658 < return multiprocessing.connection.CHALLENGE --- > return multiprocessing.connection._CHALLENGE 4668a4668,4705 > > @hashlib_helper.requires_hashdigest('md5') > @hashlib_helper.requires_hashdigest('sha256') > class ChallengeResponseTest(unittest.TestCase): > authkey = b'supadupasecretkey' > > def create_response(self, message): > return multiprocessing.connection._create_response( > self.authkey, message > ) > > def verify_challenge(self, message, response): > return multiprocessing.connection._verify_challenge( > self.authkey, message, response > ) > > def test_challengeresponse(self): > for algo in [None, "md5", "sha256"]: > with self.subTest(f"{algo=}"): > msg = b'is-twenty-bytes-long' # The length of a legacy message. > if algo: > prefix = b'{%s}' % algo.encode("ascii") > else: > prefix = b'' > msg = prefix + msg > response = self.create_response(msg) > if not response.startswith(prefix): > self.fail(response) > self.verify_challenge(msg, response) > > # TODO(gpshead): We need integration tests for handshakes between modern > # deliver_challenge() and verify_response() code and connections running a > # test-local copy of the legacy Python <=3.11 implementations. > > # TODO(gpshead): properly annotate tests for requires_hashdigest rather than > # only running these on a platform supporting everything. otherwise logic > # issues preventing it from working on FIPS mode setups will be hidden. > 4676c4713 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 5540c5577 < @hashlib_helper.requires_hashdigest('md5') --- > @hashlib_helper.requires_hashdigest('sha256') 5972c6009 < Temp = hashlib_helper.requires_hashdigest('md5')(Temp) --- > Temp = hashlib_helper.requires_hashdigest('sha256')(Temp) # ---------------------------------------------------------------------- diff Python-3.12.0b1/Lib/multiprocessing/spawn.py Python-3.12.0b4/Lib/multiprocessing/spawn.py 34c34 < WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") --- > WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe") 38c38,40 < if sys.platform == 'win32': --- > if exe is None: > _python_exe = exe > elif sys.platform == 'win32': diff Python-3.12.0b1/Lib/test/_test_multiprocessing.py Python-3.12.0b4/Lib/test/_test_multiprocessing.py 15a16 > import functools 33a35 > from test.support import script_helper 173a176,228 > def only_run_in_spawn_testsuite(reason): > """Returns a decorator: raises SkipTest when SM != spawn at test time. > > This can be useful to save overall Python test suite execution time. > "spawn" is the universal mode available on all platforms so this limits the > decorated test to only execute within test_multiprocessing_spawn. > > This would not be necessary if we refactored our test suite to split things > into other test files when they are not start method specific to be rerun > under all start methods. > """ > > def decorator(test_item): > > @functools.wraps(test_item) > def spawn_check_wrapper(*args, **kwargs): > if (start_method := multiprocessing.get_start_method()) != "spawn": > raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}") > return test_item(*args, **kwargs) > > return spawn_check_wrapper > > return decorator > > > class TestInternalDecorators(unittest.TestCase): > """Logic within a test suite that could errantly skip tests? Test it!""" > > @unittest.skipIf(sys.platform == "win32", "test requires that fork exists.") > def test_only_run_in_spawn_testsuite(self): > if multiprocessing.get_start_method() != "spawn": > raise unittest.SkipTest("only run in test_multiprocessing_spawn.") > > try: > @only_run_in_spawn_testsuite("testing this decorator") > def return_four_if_spawn(): > return 4 > except Exception as err: > self.fail(f"expected decorated `def` not to raise; caught {err}") > > orig_start_method = multiprocessing.get_start_method(allow_none=True) > try: > multiprocessing.set_start_method("spawn", force=True) > self.assertEqual(return_four_if_spawn(), 4) > multiprocessing.set_start_method("fork", force=True) > with self.assertRaises(unittest.SkipTest) as ctx: > return_four_if_spawn() > self.assertIn("testing this decorator", str(ctx.exception)) > self.assertIn("start_method=", str(ctx.exception)) > finally: > multiprocessing.set_start_method(orig_start_method, force=True) > > 5817a5873 > @only_run_in_spawn_testsuite("spawn specific test.") 5828d5883 < 5830d5884 < 5832d5885 < 5834d5886 < 5840c5892 < rc, out, err = test.support.script_helper.assert_python_ok(testfn) --- > rc, out, err = script_helper.assert_python_ok(testfn) 5843c5895 < self.assertEqual(err, b'') --- > self.assertFalse(err, msg=err.decode('utf-8')) 5851a5904,5921 > @only_run_in_spawn_testsuite("avoids redundant testing.") > def test_spawn_sys_executable_none_allows_import(self): > # Regression test for a bug introduced in > # https://github.com/python/cpython/issues/90876 that caused an > # ImportError in multiprocessing when sys.executable was None. > # This can be true in embedded environments. > rc, out, err = script_helper.assert_python_ok( > "-c", > """if 1: > import sys > sys.executable = None > assert "multiprocessing" not in sys.modules, "already imported!" > import multiprocessing > import multiprocessing.spawn # This should not fail\n""", > ) > self.assertEqual(rc, 0) > self.assertFalse(err, msg=err.decode('utf-8')) > # ---------------------------------------------------------------------- diff Python-3.12.0b4/Modules/_multiprocessing/semaphore.c Python-3.12.0rc2/Modules/_multiprocessing/semaphore.c 519,521d518 < if (handle != SEM_FAILED) < SEM_CLOSE(handle); < PyMem_Free(name_copy); 524a522,524 > if (handle != SEM_FAILED) > SEM_CLOSE(handle); > PyMem_Free(name_copy); 558a559 > PyErr_SetFromErrno(PyExc_OSError); 560c561 < return PyErr_SetFromErrno(PyExc_OSError); --- > return NULL; diff Python-3.12.0b4/Lib/multiprocessing/forkserver.py Python-3.12.0rc2/Lib/multiprocessing/forkserver.py 64c64 < if not all(type(mod) is str for mod in self._preload_modules): --- > if not all(type(mod) is str for mod in modules_names): diff Python-3.12.0b4/Lib/multiprocessing/spawn.py Python-3.12.0rc2/Lib/multiprocessing/spawn.py 153c153,157 < is not going to be frozen to produce an executable.''') --- > is not going to be frozen to produce an executable. > > To fix this issue, refer to the "Safe importing of main module" > section in https://docs.python.org/3/library/multiprocessing.html > ''') diff Python-3.12.0b4/Lib/multiprocessing/synchronize.py Python-3.12.0rc2/Lib/multiprocessing/synchronize.py 53,54c53,54 < name = ctx.get_start_method() < unlink_now = sys.platform == 'win32' or name == 'fork' --- > self._is_fork_ctx = ctx.get_start_method() == 'fork' > unlink_now = sys.platform == 'win32' or self._is_fork_ctx 105a106,110 > if self._is_fork_ctx: > raise RuntimeError('A SemLock created in a fork context is being ' > 'shared with a process in a spawn context. This is ' > 'not supported. Please use the same context to create ' > 'multiprocessing objects and Process.') 112a118,119 > # Ensure that deserialized SemLock can be serialized again (gh-108520). > self._is_fork_ctx = False diff Python-3.12.0b4/Lib/test/_test_multiprocessing.py Python-3.12.0rc2/Lib/test/_test_multiprocessing.py 331a332 > @support.requires_resource('cpu') 4468a4470 > @support.requires_resource('cpu') 5333a5336,5343 > def test_context_check_module_types(self): > try: > ctx = multiprocessing.get_context('forkserver') > except ValueError: > raise unittest.SkipTest('forkserver should be available') > with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'): > ctx.set_forkserver_preload([1, 2, 3]) > 5377a5388,5435 > @unittest.skipIf(sys.platform == "win32", > "Only Spawn on windows so no risk of mixing") > @only_run_in_spawn_testsuite("avoids redundant testing.") > def test_mixed_startmethod(self): > # Fork-based locks cannot be used with spawned process > for process_method in ["spawn", "forkserver"]: > queue = multiprocessing.get_context("fork").Queue() > process_ctx = multiprocessing.get_context(process_method) > p = process_ctx.Process(target=close_queue, args=(queue,)) > err_msg = "A SemLock created in a fork" > with self.assertRaisesRegex(RuntimeError, err_msg): > p.start() > > # non-fork-based locks can be used with all other start methods > for queue_method in ["spawn", "forkserver"]: > for process_method in multiprocessing.get_all_start_methods(): > queue = multiprocessing.get_context(queue_method).Queue() > process_ctx = multiprocessing.get_context(process_method) > p = process_ctx.Process(target=close_queue, args=(queue,)) > p.start() > p.join() > > @classmethod > def _put_one_in_queue(cls, queue): > queue.put(1) > > @classmethod > def _put_two_and_nest_once(cls, queue): > queue.put(2) > process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,)) > process.start() > process.join() > > def test_nested_startmethod(self): > # gh-108520: Regression test to ensure that child process can send its > # arguments to another process > queue = multiprocessing.Queue() > > process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,)) > process.start() > process.join() > > results = [] > while not queue.empty(): > results.append(queue.get()) > > self.assertEqual(results, [2, 1]) > 6061c6119,6120 < def install_tests_in_module_dict(remote_globs, start_method): --- > def install_tests_in_module_dict(remote_globs, start_method, > only_type=None, exclude_types=False): 6073a6133,6136 > if only_type and type_ != only_type: > continue > if exclude_types: > continue 6083a6147,6149 > if only_type: > continue > # ---------------------------------------------------------------------- $ diff Python-3.12.0rc2/Lib/test/_test_multiprocessing.py Python-3.12.0rc3/Lib/test/_test_multiprocessing.py 677a678 > @support.requires_resource('walltime') 4955a4957 > @support.requires_resource('walltime') 4983a4986 > @support.requires_resource('walltime') # ---------------------------------------------------------------------- diff Python-3.12.0rc3/Lib/multiprocessing/connection.py Python-3.12.1/Lib/multiprocessing/connection.py 11a12 > import errno 273a275 > _send_ov = None 275a278,281 > ov = self._send_ov > if ov is not None: > # Interrupt WaitForMultipleObjects() in _send_bytes() > ov.cancel() 278a285,288 > if self._send_ov is not None: > # A connection should only be used by a single thread > raise ValueError("concurrent send_bytes() calls " > "are not supported") 279a290 > self._send_ov = ov 288a300 > self._send_ov = None 289a302,306 > if err == _winapi.ERROR_OPERATION_ABORTED: > # close() was called by another thread while > # WaitForMultipleObjects() was waiting for the overlapped > # operation. > raise OSError(errno.EPIPE, "handle is closed") diff Python-3.12.0rc3/Lib/multiprocessing/popen_spawn_win32.py Python-3.12.1/Lib/multiprocessing/popen_spawn_win32.py 16a17 > # Exit code used by Popen.terminate() 125,126c126,130 < except OSError: < if self.wait(timeout=1.0) is None: --- > except PermissionError: > # ERROR_ACCESS_DENIED (winerror 5) is received when the > # process already died. > code = _winapi.GetExitCodeProcess(int(self._handle)) > if code == _winapi.STILL_ACTIVE: 127a132,134 > self.returncode = code > else: > self.returncode = -signal.SIGTERM diff Python-3.12.0rc3/Lib/multiprocessing/queues.py Python-3.12.1/Lib/multiprocessing/queues.py 160a161,169 > def _terminate_broken(self): > # Close a Queue on error. > > # gh-94777: Prevent queue writing to a pipe which is no longer read. > self._reader.close() > > self.close() > self.join_thread() > 172c181,182 < name='QueueFeederThread' --- > name='QueueFeederThread', > daemon=True, 174d183 < self._thread.daemon = True 176,178c185,193 < debug('doing self._thread.start()') < self._thread.start() < debug('... done self._thread.start()') --- > try: > debug('doing self._thread.start()') > self._thread.start() > debug('... done self._thread.start()') > except: > # gh-109047: During Python finalization, creating a thread > # can fail with RuntimeError. > self._thread = None > raise diff Python-3.12.0rc3/Lib/multiprocessing/resource_tracker.py Python-3.12.1/Lib/multiprocessing/resource_tracker.py 53a54,57 > class ReentrantCallError(RuntimeError): > pass > > 57c61 < self._lock = threading.Lock() --- > self._lock = threading.RLock() 60a65,72 > def _reentrant_call_error(self): > # gh-109629: this happens if an explicit call to the ResourceTracker > # gets interrupted by a garbage collection, invoking a finalizer (*) > # that itself calls back into ResourceTracker. > # (*) for example the SemLock finalizer > raise ReentrantCallError( > "Reentrant call into the multiprocessing resource tracker") > 62a75,78 > # This should not happen (_stop() isn't called by a finalizer) > # but we check for it anyway. > if self._lock._recursion_count() > 1: > return self._reentrant_call_error() 83a100,102 > if self._lock._recursion_count() > 1: > # The code below is certainly not reentrant-safe, so bail out > return self._reentrant_call_error() 162c181,191 < self.ensure_running() --- > try: > self.ensure_running() > except ReentrantCallError: > # The code below might or might not work, depending on whether > # the resource tracker was already running and still alive. > # Better warn the user. > # (XXX is warnings.warn itself reentrant-safe? :-) > warnings.warn( > f"ResourceTracker called reentrantly for resource cleanup, " > f"which is unsupported. " > f"The {rtype} object {name!r} might leak.") 178a208 > diff Python-3.12.0rc3/Lib/test/_test_multiprocessing.py Python-3.12.1/Lib/test/_test_multiprocessing.py 81,82c81,82 < if support.check_sanitizer(address=True): < # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to --- > if support.HAVE_ASAN_FORK_BUG: > # gh-89363: Skip multiprocessing tests if Python is built with ASAN to 84c84,89 < raise unittest.SkipTest("libasan has a pthread_create() dead lock") --- > raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") > > > # gh-110666: Tolerate a difference of 100 ms when comparing timings > # (clock resolution) > CLOCK_RES = 0.100 560,561c565 < if os.name != 'nt': < self.assertEqual(exitcode, -signal.SIGTERM) --- > self.assertEqual(exitcode, -signal.SIGTERM) 566a571,572 > else: > self.assertEqual(exitcode, -signal.SIGTERM) 1653c1659 < expected = 0.1 --- > expected = 0.100 1657,1658c1663 < # borrow logic in assertTimeout() from test/lock_tests.py < if not result and expected * 0.6 < dt < expected * 10.0: --- > if not result and (expected - CLOCK_RES) <= dt: 1677c1682 < time.sleep(0.01) --- > time.sleep(0.010) 2436,2437c2441,2445 < def sqr(x, wait=0.0): < time.sleep(wait) --- > def sqr(x, wait=0.0, event=None): > if event is None: > time.sleep(wait) > else: > event.wait(wait) 2576,2579c2584,2595 < res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) < get = TimingWrapper(res.get) < self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) < self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) --- > p = self.Pool(3) > try: > event = threading.Event() if self.TYPE == 'threads' else None > res = p.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT, event)) > get = TimingWrapper(res.get) > self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) > self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) > finally: > if event is not None: > event.set() > p.terminate() > p.join() 2680,2687c2696,2701 < result = self.pool.map_async( < time.sleep, [0.1 for i in range(10000)], chunksize=1 < ) < self.pool.terminate() < join = TimingWrapper(self.pool.join) < join() < # Sanity check the pool didn't wait for all tasks to finish < self.assertLess(join.elapsed, 2.0) --- > # Simulate slow tasks which take "forever" to complete > p = self.Pool(3) > args = [support.LONG_TIMEOUT for i in range(10_000)] > result = p.map_async(time.sleep, args, chunksize=1) > p.terminate() > p.join() 4872c4886 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4912c4926 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4961c4975 < expected = 5 --- > timeout = 5.0 # seconds 4965c4979 < res = wait([a, b], expected) --- > res = wait([a, b], timeout) 4969,4970c4983 < self.assertLess(delta, expected * 2) < self.assertGreater(delta, expected * 0.5) --- > self.assertGreater(delta, timeout - CLOCK_RES) 4973,4974d4985 < < start = time.monotonic() 4976,4977d4986 < delta = time.monotonic() - start < 4979d4987 < self.assertLess(delta, 0.4) 5437c5445,5447 < self.assertEqual(results, [2, 1]) --- > # gh-109706: queue.put(1) can write into the queue before queue.put(2), > # there is no synchronization in the test. > self.assertSetEqual(set(results), set([2, 1])) uqfoundation-multiprocess-b3457a5/py3.12/_multiprocess/000077500000000000000000000000001455552142400231445ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/_multiprocess/__init__.py000066400000000000000000000005011455552142400252510ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.12/doc/000077500000000000000000000000001455552142400210215ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/doc/CHANGES.html000066400000000000000000001133431455552142400227640ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.12/doc/COPYING.html000066400000000000000000000040211455552142400230140ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.12/doc/INSTALL.html000066400000000000000000000063531455552142400230240ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.12/doc/THANKS.html000066400000000000000000000017751455552142400227110ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.12/doc/__init__.py000066400000000000000000000004001455552142400231240ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.12/doc/connection-objects.html000066400000000000000000000152041455552142400254770ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.12/doc/connection-objects.txt000066400000000000000000000072761455552142400253640ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/connection-ref.html000066400000000000000000000357371455552142400246370ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.12/doc/connection-ref.txt000066400000000000000000000210001455552142400244640ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.12/doc/header.txt000066400000000000000000000003401455552142400230070ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.12/doc/html4css1.css000066400000000000000000000126361455552142400233650ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.12/doc/index.html000066400000000000000000000064761455552142400230330ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.12/doc/index.txt000066400000000000000000000021751455552142400226760ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.12/doc/intro.html000066400000000000000000000427461455552142400230570ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.12/doc/intro.txt000066400000000000000000000301551455552142400227210ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.12/doc/manager-objects.html000066400000000000000000000440461455552142400247600ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.12/doc/manager-objects.txt000066400000000000000000000235161455552142400246320ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/pool-objects.html000066400000000000000000000265511455552142400243200ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.12/doc/pool-objects.txt000066400000000000000000000136411455552142400241670ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.12/doc/process-objects.html000066400000000000000000000235741455552142400250270ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.12/doc/process-objects.txt000066400000000000000000000136131455552142400246730ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/processing-ref.html000066400000000000000000000573611455552142400246510ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.12/doc/processing-ref.txt000066400000000000000000000310141455552142400245070ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/programming-guidelines.html000066400000000000000000000214551455552142400263660ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.12/doc/programming-guidelines.txt000066400000000000000000000150221455552142400262320ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.12/doc/proxy-objects.html000066400000000000000000000175771455552142400245400ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.12/doc/proxy-objects.txt000066400000000000000000000115571455552142400244030ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/queue-objects.html000066400000000000000000000227101455552142400244640ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.12/doc/queue-objects.txt000066400000000000000000000121211455552142400243320ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.12/doc/sharedctypes.html000066400000000000000000000241571455552142400244160ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.12/doc/sharedctypes.txt000066400000000000000000000143071455552142400242650ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.12/doc/tests.html000066400000000000000000000060761455552142400230620ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.12/doc/tests.txt000066400000000000000000000027331455552142400227310ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.12/doc/version.txt000066400000000000000000000000341455552142400232440ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.12/examples/000077500000000000000000000000001455552142400220725ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/examples/__init__.py000066400000000000000000000000001455552142400241710ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/examples/benchmarks.py000066400000000000000000000131321455552142400245610ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.12/examples/ex_newtype.py000066400000000000000000000030731455552142400246360ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.12/examples/ex_pool.py000066400000000000000000000155061455552142400241200ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.12/examples/ex_synchronize.py000066400000000000000000000144041455552142400255160ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.12/examples/ex_webserver.py000066400000000000000000000041001455552142400251370ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.12/examples/ex_workers.py000066400000000000000000000042241455552142400246360ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.12/index.html000066400000000000000000000117511455552142400222560ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/000077500000000000000000000000001455552142400230055ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/__init__.py000066400000000000000000000035001455552142400251140ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/connection.py000066400000000000000000001211511455552142400255170ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import errno import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) if m.itemsize > 1: m = m.cast('B') n = m.nbytes if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False _send_ov = None def _close(self, _CloseHandle=_winapi.CloseHandle): ov = self._send_ov if ov is not None: # Interrupt WaitForMultipleObjects() in _send_bytes() ov.cancel() _CloseHandle(self._handle) def _send_bytes(self, buf): if self._send_ov is not None: # A connection should only be used by a single thread raise ValueError("concurrent send_bytes() calls " "are not supported") ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) self._send_ov = ov try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: self._send_ov = None nwritten, err = ov.GetOverlappedResult(True) if err == _winapi.ERROR_OPERATION_ABORTED: # close() was called by another thread while # WaitForMultipleObjects() was waiting for the overlapped # operation. raise OSError(errno.EPIPE, "handle is closed") assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 40 # MUST be > 20 MESSAGE_MAXLEN = 256 # default is None _CHALLENGE = b'#CHALLENGE#' _WELCOME = b'#WELCOME#' _FAILURE = b'#FAILURE#' # multiprocessing.connection Authentication Handshake Protocol Description # (as documented for reference after reading the existing code) # ============================================================================= # # On Windows: native pipes with "overlapped IO" are used to send the bytes, # instead of the length prefix SIZE scheme described below. (ie: the OS deals # with message sizes for us) # # Protocol error behaviors: # # On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater # than the requested maxsize to receive, or receiving fewer than SIZE bytes # results in the connection being closed and auth to fail. # # On Windows, receiving too few bytes is never a low level _recv_bytes read # error, receiving too many will trigger an error only if receive maxsize # value was larger than 128 OR the if the data arrived in smaller pieces. # # Serving side Client side # ------------------------------ --------------------------------------- # 0. Open a connection on the pipe. # 1. Accept connection. # 2. Random 20+ bytes -> MESSAGE # Modern servers always send # more than 20 bytes and include # a {digest} prefix on it with # their preferred HMAC digest. # Legacy ones send ==20 bytes. # 3. send 4 byte length (net order) # prefix followed by: # b'#CHALLENGE#' + MESSAGE # 4. Receive 4 bytes, parse as network byte # order integer. If it is -1, receive an # additional 8 bytes, parse that as network # byte order. The result is the length of # the data that follows -> SIZE. # 5. Receive min(SIZE, 256) bytes -> M1 # 6. Assert that M1 starts with: # b'#CHALLENGE#' # 7. Strip that prefix from M1 into -> M2 # 7.1. Parse M2: if it is exactly 20 bytes in # length this indicates a legacy server # supporting only HMAC-MD5. Otherwise the # 7.2. preferred digest is looked up from an # expected "{digest}" prefix on M2. No prefix # or unsupported digest? <- AuthenticationError # 7.3. Put divined algorithm name in -> D_NAME # 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST # 9. Send 4 byte length prefix (net order) # followed by C_DIGEST bytes. # 10. Receive 4 or 4+8 byte length # prefix (#4 dance) -> SIZE. # 11. Receive min(SIZE, 256) -> C_D. # 11.1. Parse C_D: legacy servers # accept it as is, "md5" -> D_NAME # 11.2. modern servers check the length # of C_D, IF it is 16 bytes? # 11.2.1. "md5" -> D_NAME # and skip to step 12. # 11.3. longer? expect and parse a "{digest}" # prefix into -> D_NAME. # Strip the prefix and store remaining # bytes in -> C_D. # 11.4. Don't like D_NAME? <- AuthenticationError # 12. Compute HMAC-D_NAME of AUTHKEY, # MESSAGE into -> M_DIGEST. # 13. Compare M_DIGEST == C_D: # 14a: Match? Send length prefix & # b'#WELCOME#' # <- RETURN # 14b: Mismatch? Send len prefix & # b'#FAILURE#' # <- CLOSE & AuthenticationError # 15. Receive 4 or 4+8 byte length prefix (net # order) again as in #4 into -> SIZE. # 16. Receive min(SIZE, 256) bytes -> M3. # 17. Compare M3 == b'#WELCOME#': # 17a. Match? <- RETURN # 17b. Mismatch? <- CLOSE & AuthenticationError # # If this RETURNed, the connection remains open: it has been authenticated. # # Length prefixes are used consistently. Even on the legacy protocol, this # was good fortune and allowed us to evolve the protocol by using the length # of the opening challenge or length of the returned digest as a signal as # to which protocol the other end supports. _ALLOWED_DIGESTS = frozenset( {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) _MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) # Old hmac-md5 only server versions from Python <=3.11 sent a message of this # length. It happens to not match the length of any supported digest so we can # use a message of this length to indicate that we should work in backwards # compatible md5-only mode without a {digest_name} prefix on our response. _MD5ONLY_MESSAGE_LENGTH = 20 _MD5_DIGEST_LEN = 16 _LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) def _get_digest_name_and_payload(message: bytes) -> (str, bytes): """Returns a digest name and the payload for a response hash. If a legacy protocol is detected based on the message length or contents the digest name returned will be empty to indicate legacy mode where MD5 and no digest prefix should be sent. """ # modern message format: b"{digest}payload" longer than 20 bytes # legacy message format: 16 or 20 byte b"payload" if len(message) in _LEGACY_LENGTHS: # Either this was a legacy server challenge, or we're processing # a reply from a legacy client that sent an unprefixed 16-byte # HMAC-MD5 response. All messages using the modern protocol will # be longer than either of these lengths. return '', message if (message.startswith(b'{') and (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): digest = message[1:curly] if digest in _ALLOWED_DIGESTS: payload = message[curly+1:] return digest.decode('ascii'), payload raise AuthenticationError( 'unsupported message length, missing digest prefix, ' f'or unsupported digest: {message=}') def _create_response(authkey, message): """Create a MAC based on authkey and message The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response is the raw MAC, otherwise the response is prefixed with '{digest_name}', e.g. b'{sha256}abcdefg...' Note: The MAC protects the entire message including the digest_name prefix. """ import hmac digest_name = _get_digest_name_and_payload(message)[0] # The MAC protects the entire message: digest header and payload. if not digest_name: # Legacy server without a {digest} prefix on message. # Generate a legacy non-prefixed HMAC-MD5 reply. try: return hmac.new(authkey, message, 'md5').digest() except ValueError: # HMAC-MD5 is not available (FIPS mode?), fall back to # HMAC-SHA2-256 modern protocol. The legacy server probably # doesn't support it and will reject us anyways. :shrug: digest_name = 'sha256' # Modern protocol, indicate the digest used in the reply. response = hmac.new(authkey, message, digest_name).digest() return b'{%s}%s' % (digest_name.encode('ascii'), response) def _verify_challenge(authkey, message, response): """Verify MAC challenge If our message did not include a digest_name prefix, the client is allowed to select a stronger digest_name from _ALLOWED_DIGESTS. In case our message is prefixed, a client cannot downgrade to a weaker algorithm, because the MAC is calculated over the entire message including the '{digest_name}' prefix. """ import hmac response_digest, response_mac = _get_digest_name_and_payload(response) response_digest = response_digest or 'md5' try: expected = hmac.new(authkey, message, response_digest).digest() except ValueError: raise AuthenticationError(f'{response_digest=} unsupported') if len(expected) != len(response_mac): raise AuthenticationError( f'expected {response_digest!r} of length {len(expected)} ' f'got {len(response_mac)}') if not hmac.compare_digest(expected, response_mac): raise AuthenticationError('digest received was wrong') def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" message = os.urandom(MESSAGE_LENGTH) message = b'{%s}%s' % (digest_name.encode('ascii'), message) # Even when sending a challenge to a legacy client that does not support # digest prefixes, they'll take the entire thing as a challenge and # respond to it with a raw HMAC-MD5. connection.send_bytes(_CHALLENGE + message) response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message try: _verify_challenge(authkey, message, response) except AuthenticationError: connection.send_bytes(_FAILURE) raise else: connection.send_bytes(_WELCOME) def answer_challenge(connection, authkey: bytes): if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message if not message.startswith(_CHALLENGE): raise AuthenticationError( f'Protocol error, expected challenge: {message=}') message = message[len(_CHALLENGE):] if len(message) < _MD5ONLY_MESSAGE_LENGTH: raise AuthenticationError('challenge too short: {len(message)} bytes') digest = _create_response(authkey, message) connection.send_bytes(digest) response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message if response != _WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects shareable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/context.py000066400000000000000000000266461455552142400250610ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) @staticmethod def _after_fork(): return _default_context.get_context().Process._after_fork() class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): """Returns a list of the supported start methods, default first.""" if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/dummy/000077500000000000000000000000001455552142400241405ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262660ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266570ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/forkserver.py000066400000000000000000000275421455552142400255610ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in modules_names): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/heap.py000066400000000000000000000265521455552142400243060ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/managers.py000066400000000000000000001350731455552142400251650ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) del view_type, view_types # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def _handle_request(self, c): request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) def handle_request(self, conn): ''' Handle a new connection ''' try: self._handle_request(conn) except SystemExit: # Server.serve_client() calls sys.exit(0) on EOF pass finally: conn.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None, *, shutdown_timeout=1.0): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() self._shutdown_timeout = shutdown_timeout def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client, self._shutdown_timeout), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client, shutdown_timeout): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive after terminate') process.kill() process.join() state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/pool.py000066400000000000000000000777701455552142400243520ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if maxtasksperchild is not None: if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: raise ValueError("maxtasksperchild must be a positive int or None") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_handler not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/popen_fork.py000066400000000000000000000045061455552142400255260ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267550ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400271440ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/popen_spawn_win32.py000066400000000000000000000104011455552142400267260ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # # Exit code used by Popen.terminate() TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): cmd[0] = python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None cmd = ' '.join('"%s"' % x for x in cmd) with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except PermissionError: # ERROR_ACCESS_DENIED (winerror 5) is received when the # process already died. code = _winapi.GetExitCodeProcess(int(self._handle)) if code == _winapi.STILL_ACTIVE: raise self.returncode = code else: self.returncode = -signal.SIGTERM kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/process.py000066400000000000000000000275451455552142400250520ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if (child_popen := p._popen) and child_popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: self._after_fork() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode @staticmethod def _after_fork(): from . import util util._finalizer_registry.clear() util._run_after_forkers() # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' del name, signum # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/queues.py000066400000000000000000000305071455552142400246730ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _terminate_broken(self): # Close a Queue on error. # gh-94777: Prevent queue writing to a pipe which is no longer read. self._reader.close() self.close() self.join_thread() def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread', daemon=True, ) try: debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') except: # gh-109047: During Python finalization, creating a thread # can fail with RuntimeError. self._thread = None raise if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() __class_getitem__ = classmethod(types.GenericAlias) _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/reduction.py000066400000000000000000000226451455552142400253640ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400265500ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/resource_tracker.py000066400000000000000000000243211455552142400267230ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem # Use sem_unlink() to clean up named semaphores. # # sem_unlink() may be missing if the Python build process detected the # absence of POSIX named semaphores. In that case, no named semaphores were # ever opened, so no cleanup would be necessary. if hasattr(_multiprocessing, 'sem_unlink'): _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, }) _CLEANUP_FUNCS.update({ 'shared_memory': _posixshmem.shm_unlink, }) class ReentrantCallError(RuntimeError): pass class ResourceTracker(object): def __init__(self): self._lock = threading.RLock() self._fd = None self._pid = None def _reentrant_call_error(self): # gh-109629: this happens if an explicit call to the ResourceTracker # gets interrupted by a garbage collection, invoking a finalizer (*) # that itself calls back into ResourceTracker. # (*) for example the SemLock finalizer raise ReentrantCallError( "Reentrant call into the multiprocessing resource tracker") def _stop(self): with self._lock: # This should not happen (_stop() isn't called by a finalizer) # but we check for it anyway. if getattr(self._lock, "_recursion_count", int)() > 1: return self._reentrant_call_error() if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if getattr(self._lock, "_recursion_count", int)() > 1: # The code below is certainly not reentrant-safe, so bail out return self._reentrant_call_error() if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): try: self.ensure_running() except ReentrantCallError: # The code below might or might not work, depending on whether # the resource tracker was already running and still alive. # Better warn the user. # (XXX is warnings.warn itself reentrant-safe? :-) warnings.warn( f"ResourceTracker called reentrantly for resource cleanup, " f"which is unsupported. " f"The {rtype} object {name!r} might leak.") msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(msg) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('msg too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/shared_memory.py000066400000000000000000000440321455552142400262200ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True from . import resource_tracker _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise resource_tracker.register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) try: size = _winapi.VirtualQuerySize(p_buf) finally: _winapi.UnmapViewOfFile(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: _posixshmem.shm_unlink(self._name) resource_tracker.unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260600ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/spawn.py000066400000000000000000000226511455552142400245150ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe") def set_executable(exe): global _python_exe if exe is None: _python_exe = exe elif sys.platform == 'win32': _python_exe = os.fsdecode(exe) else: _python_exe = os.fsencode(exe) def get_executable(): return _python_exe if WINSERVICE: set_executable(os.path.join(sys.exec_prefix, 'python.exe')) else: set_executable(sys.executable) # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() exe = get_executable() return [exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. To fix this issue, refer to the "Safe importing of main module" section in https://docs.python.org/3/library/multiprocessing.html ''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/synchronize.py000066400000000000000000000303321455552142400257330ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() self._is_fork_ctx = ctx.get_start_method() == 'fork' unlink_now = sys.platform == 'win32' or self._is_fork_ctx for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: if self._is_fork_ctx: raise RuntimeError('A SemLock created in a fork context is being ' 'shared with a process in a spawn context. This is ' 'not supported. Please use the same context to create ' 'multiprocess objects and Process.') h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() # Ensure that deserialized SemLock can be serialized again (gh-108520). self._is_fork_ctx = False @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False def __repr__(self) -> str: set_status = 'set' if self.is_set() else 'unset' return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/000077500000000000000000000000001455552142400241475ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/__init__.py000066400000000000000000006315261455552142400262750ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import textwrap import time import io import itertools import sys import os import gc import errno import functools import signal import array import socket import random import logging import subprocess import struct import operator import pathlib import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import script_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess.connection import wait, AuthenticationError from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'HAVE_ASAN_FORK_BUG') and support.HAVE_ASAN_FORK_BUG: # gh-89363: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds # gh-110666: Tolerate a difference of 100 ms when comparing timings # (clock resolution) CLOCK_RES = 0.100 def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 # BaseManager.shutdown_timeout SHUTDOWN_TIMEOUT = support.SHORT_TIMEOUT WAIT_ACTIVE_CHILDREN_TIMEOUT = 5.0 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) def only_run_in_spawn_testsuite(reason): """Returns a decorator: raises SkipTest when SM != spawn at test time. This can be useful to save overall Python test suite execution time. "spawn" is the universal mode available on all platforms so this limits the decorated test to only execute within test_multiprocessing_spawn. This would not be necessary if we refactored our test suite to split things into other test files when they are not start method specific to be rerun under all start methods. """ def decorator(test_item): @functools.wraps(test_item) def spawn_check_wrapper(*args, **kwargs): if (start_method := multiprocessing.get_start_method()) != "spawn": raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}") return test_item(*args, **kwargs) return spawn_check_wrapper return decorator class TestInternalDecorators(unittest.TestCase): """Logic within a test suite that could errantly skip tests? Test it!""" @unittest.skipIf(sys.platform == "win32", "test requires that fork exists.") def test_only_run_in_spawn_testsuite(self): if multiprocessing.get_start_method() != "spawn": raise unittest.SkipTest("only run in test_multiprocessing_spawn.") try: @only_run_in_spawn_testsuite("testing this decorator") def return_four_if_spawn(): return 4 except Exception as err: self.fail(f"expected decorated `def` not to raise; caught {err}") orig_start_method = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method("spawn", force=True) self.assertEqual(return_four_if_spawn(), 4) multiprocessing.set_start_method("fork", force=True) with self.assertRaises(unittest.SkipTest) as ctx: return_four_if_spawn() self.assertIn("testing this decorator", str(ctx.exception)) self.assertIn("start_method=", str(ctx.exception)) finally: multiprocessing.set_start_method(orig_start_method, force=True) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_set_executable(self): if self.TYPE == 'threads': self.skipTest(f'test not appropriate for {self.TYPE}') paths = [ sys.executable, # str sys.executable.encode(), # bytes pathlib.Path(sys.executable) # os.PathLike ] for path in paths: self.set_executable(path) p = self.Process() p.start() p.join() self.assertEqual(p.exitcode, 0) @support.requires_resource('cpu') def test_args_argument(self): # bpo-45735: Using list or tuple as *args* in constructor could # achieve the same effect. args_cases = (1, "str", [1], (1,)) args_types = (list, tuple) test_cases = itertools.product(args_cases, args_types) for args, args_type in test_cases: with self.subTest(args=args, args_type=args_type): q = self.Queue(1) # pass a tuple or list as args p = self.Process(target=self._test_args, args=args_type((q, args))) p.daemon = True p.start() child_args = q.get() self.assertEqual(child_args, args) p.join() close_queue(q) @classmethod def _test_args(cls, q, arg): q.put(arg) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) else: self.assertEqual(exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @support.requires_resource('walltime') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) #self.assertIn("1/0 # MARKER", err) #FIXME @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.100 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt if not result and (expected - CLOCK_RES) <= dt: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.010) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() def test_repr(self) -> None: event = self.Event() if self.TYPE == 'processes': self.assertRegex(repr(event), r"") event.set() self.assertRegex(repr(event), r"") event.clear() self.assertRegex(repr(event), r"") elif self.TYPE == 'manager': self.assertRegex(repr(event), r" 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('sha256') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('sha256') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() @unittest.skipIf(True, "fails with dill >= 0.3.5") def test_shared_memory_recreate(self): # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) def test_invalid_shared_memory_cration(self): # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_pickle_unpickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) sms.buf[0:6] = b'pickle' # Test pickling pickled_sms = pickle.dumps(sms, protocol=proto) # Test unpickling sms2 = pickle.loads(pickled_sms) self.assertIsInstance(sms2, shared_memory.SharedMemory) self.assertEqual(sms.name, sms2.name) self.assertEqual(bytes(sms.buf[0:6]), b'pickle') self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') # Test that unpickled version is still the same SharedMemory sms.buf[0:6] = b'newval' self.assertEqual(bytes(sms.buf[0:6]), b'newval') self.assertEqual(bytes(sms2.buf[0:6]), b'newval') sms2.buf[0:6] = b'oldval' self.assertEqual(bytes(sms.buf[0:6]), b'oldval') self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') def test_shared_memory_pickle_unpickle_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sms.close() sms.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(pickled_sms) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl, protocol=proto) deserialized_sl = pickle.loads(serialized_sl) self.assertIsInstance( deserialized_sl, shared_memory.ShareableList) self.assertEqual(deserialized_sl[-1], 9) self.assertIsNot(sl, deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") sl[3] = "newvalue" self.assertEqual(deserialized_sl[3], "newvalue") larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_ShareableList_pickling_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) serialized_sl = pickle.dumps(sl, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sl.shm.close() sl.shm.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(serialized_sl) def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() err_msg = ("A SharedMemory segment was leaked after " "a process was abruptly terminated") for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg): try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # Test to verify that `Finalize` works. # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @support.requires_resource('cpu') def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('sha256') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection._CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') @hashlib_helper.requires_hashdigest('md5') @hashlib_helper.requires_hashdigest('sha256') class ChallengeResponseTest(unittest.TestCase): authkey = b'supadupasecretkey' def create_response(self, message): return multiprocessing.connection._create_response( self.authkey, message ) def verify_challenge(self, message, response): return multiprocessing.connection._verify_challenge( self.authkey, message, response ) def test_challengeresponse(self): for algo in [None, "md5", "sha256"]: with self.subTest(f"{algo=}"): msg = b'is-twenty-bytes-long' # The length of a legacy message. if algo: prefix = b'{%s}' % algo.encode("ascii") else: prefix = b'' msg = prefix + msg response = self.create_response(msg) if not response.startswith(prefix): self.fail(response) self.verify_challenge(msg, response) # TODO(gpshead): We need integration tests for handshakes between modern # deliver_challenge() and verify_response() code and connections running a # test-local copy of the legacy Python <=3.11 implementations. # TODO(gpshead): properly annotate tests for requires_hashdigest rather than # only running these on a platform supporting everything. otherwise logic # issues preventing it from working on FIPS mode setups will be hidden. # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('sha256') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random() * 0.100) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random() * 0.100) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) @support.requires_resource('walltime') def test_wait_timeout(self): from multiprocess.connection import wait timeout = 5.0 # seconds a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], timeout) delta = time.monotonic() - start self.assertEqual(res, []) self.assertGreater(delta, timeout - CLOCK_RES) b.send(None) res = wait([a, b], 20) self.assertEqual(res, [a]) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) @support.requires_resource('walltime') def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls, start_method): import json mp = multiprocessing.get_context(start_method) r, w = mp.Pipe(duplex=False) p = mp.Process(target=cls.run_in_grandchild, args=(w,)) with warnings.catch_warnings(category=DeprecationWarning): p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ( 'from multiprocess.tests import TestFlags; ' f'TestFlags.run_in_child({multiprocessing.get_start_method()!r})' ) data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_context_check_module_types(self): try: ctx = multiprocessing.get_context('forkserver') except ValueError: raise unittest.SkipTest('forkserver should be available') with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'): ctx.set_forkserver_preload([1, 2, 3]) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "Only Spawn on windows so no risk of mixing") @only_run_in_spawn_testsuite("avoids redundant testing.") def test_mixed_startmethod(self): # Fork-based locks cannot be used with spawned process for process_method in ["spawn", "forkserver"]: queue = multiprocessing.get_context("fork").Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) err_msg = "A SemLock created in a fork" with self.assertRaisesRegex(RuntimeError, err_msg): p.start() # non-fork-based locks can be used with all other start methods for queue_method in ["spawn", "forkserver"]: for process_method in multiprocessing.get_all_start_methods(): queue = multiprocessing.get_context(queue_method).Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) p.start() p.join() @classmethod def _put_one_in_queue(cls, queue): queue.put(1) @classmethod def _put_two_and_nest_once(cls, queue): queue.put(2) process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,)) process.start() process.join() def test_nested_startmethod(self): # gh-108520: Regression test to ensure that child process can send its # arguments to another process queue = multiprocessing.Queue() process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,)) process.start() process.join() results = [] while not queue.empty(): results.append(queue.get()) # gh-109706: queue.put(1) can write into the queue before queue.put(2), # there is no synchronization in the test. self.assertSetEqual(set(results), set([2, 1])) @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() err_msg = (f"A {rtype} resource was leaked after a process was " f"abruptly terminated") for _ in support.sleeping_retry(support.SHORT_TIMEOUT, err_msg): try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) def test_too_long_name_resource(self): # gh-96819: Resource names that will make the length of a write to a pipe # greater than PIPE_BUF are not allowed rtype = "shared_memory" too_long_name_resource = "a" * (512 - len(rtype)) with self.assertRaises(ValueError): resource_tracker.register(too_long_name_resource, rtype) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('sha256') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT start_time = time.monotonic() for _ in support.sleeping_retry(timeout, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): case = unittest.TestCase() case.assertEqual(obj[0], 5) case.assertEqual(obj.count(5), 1) case.assertEqual(obj.index(5), 0) obj.sort() obj.reverse() for x in obj: pass case.assertEqual(len(obj), 1) case.assertEqual(obj.pop(0), 5) def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) self.assertIsNotNone(o) self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): case = unittest.TestCase() case.assertEqual(len(obj), 1) case.assertEqual(obj['foo'], 5) case.assertEqual(obj.get('foo'), 5) case.assertListEqual(list(obj.items()), [('foo', 5)]) case.assertListEqual(list(obj.keys()), ['foo']) case.assertListEqual(list(obj.values()), [5]) case.assertDictEqual(obj.copy(), {'foo': 5}) case.assertTupleEqual(obj.popitem(), ('foo', 5)) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) self.assertIsNotNone(o) self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): case = unittest.TestCase() case.assertEqual(obj.value, 1) case.assertEqual(obj.get(), 1) obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): case = unittest.TestCase() case.assertEqual(obj[0], 0) case.assertEqual(obj[1], 1) case.assertEqual(len(obj), 2) case.assertListEqual(list(obj), [0, 1]) def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): case = unittest.TestCase() case.assertEqual(obj.x, 0) case.assertEqual(obj.y, 1) def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class TestNamedResource(unittest.TestCase): @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("spawn specific test.") def test_global_named_resource_spawn(self): # # gh-90549: Check that global named resources in main module # will not leak by a subprocess, in spawn context. # testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) with open(testfn, 'w', encoding='utf-8') as f: f.write(textwrap.dedent('''\ import multiprocess as mp ctx = mp.get_context('spawn') global_resource = ctx.Semaphore() def submain(): pass if __name__ == '__main__': p = ctx.Process(target=submain) p.start() p.join() ''')) rc, out, err = script_helper.assert_python_ok(testfn) # on error, err = 'UserWarning: resource_tracker: There appear to # be 1 leaked semaphore objects to clean up at shutdown' self.assertFalse(err, msg=err.decode('utf-8')) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("avoids redundant testing.") def test_spawn_sys_executable_none_allows_import(self): # Regression test for a bug introduced in # https://github.com/python/cpython/issues/90876 that caused an # ImportError in multiprocessing when sys.executable was None. # This can be true in embedded environments. rc, out, err = script_helper.assert_python_ok( "-c", """if 1: import sys sys.executable = None assert "multiprocess" not in sys.modules, "already imported!" import multiprocess as multiprocessing import multiprocess.spawn # This should not fail\n""", ) self.assertEqual(rc, 0) self.assertFalse(err, msg=err.decode('utf-8')) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) set_executable = staticmethod(multiprocessing.set_executable) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT start_time = time.monotonic() for _ in support.sleeping_retry(timeout, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method, only_type=None, exclude_types=False): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: if only_type and type_ != only_type: continue if exclude_types: continue newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('sha256')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): if only_type: continue class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') @unittest.skipIf(sys.platform != "linux", "Linux only") class SemLockTests(unittest.TestCase): def test_semlock_subclass(self): class SemLock(_multiprocessing.SemLock): pass name = f'test_semlock_subclass-{os.getpid()}' s = SemLock(1, 0, 10, name, False) _multiprocessing.sem_unlink(name) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/__main__.py000066400000000000000000000015701455552142400262440ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + '__init__.py') + \ glob.glob(suite + os.path.sep + '*' + os.path.sep + '__init__.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400271520ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400266500ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_fork/000077500000000000000000000000001455552142400314565ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_fork/__init__.py000066400000000000000000000014751455552142400335760ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003021455552142400344150ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_fork/test_misc.py000066400000000000000000000003011455552142400340140ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003041455552142400350130ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003021455552142400344350ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserver/000077500000000000000000000000001455552142400327055ustar00rootroot00000000000000__init__.py000066400000000000000000000013421455552142400347370ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserverimport os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003101455552142400356430ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="manager") if __name__ == '__main__': unittest.main() test_misc.py000066400000000000000000000003071455552142400351720ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003121455552142400362410ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003101455552142400356630ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271071455552142400336660ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support from test.support import import_helper # Skip tests if _multiprocessing wasn't built. import_helper.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support import os_helper from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method from test import support # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) # up to 1 min to report the results for _ in support.sleeping_retry(support.LONG_TIMEOUT, "Timed out waiting for results"): if results: break results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method from test import support start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) # up to 1 min to report the results for _ in support.sleeping_retry(support.LONG_TIMEOUT, "Timed out waiting for results"): if results: break results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawn/000077500000000000000000000000001455552142400316455ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawn/__init__.py000066400000000000000000000011771455552142400337640ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003031455552142400346050ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawn/test_misc.py000066400000000000000000000003021455552142400342040ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003051455552142400352030ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003031455552142400346250ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.12/multiprocess/util.py000066400000000000000000000333541455552142400243440ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, encoding="utf-8", closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess import subprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [path], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, -1, None, None, None, -1, None, subprocess._USE_VFORK) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/py3.13/000077500000000000000000000000001455552142400202555ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/Modules/000077500000000000000000000000001455552142400216655ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/000077500000000000000000000000001455552142400245555ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400260165ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/clinic/multiprocessing.c.h000066400000000000000000000104221455552142400316360ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #include "pycore_modsupport.h" // _PyArg_CheckPositional() #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_closesocket__doc__, "closesocket($module, handle, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF \ {"closesocket", (PyCFunction)_multiprocessing_closesocket, METH_O, _multiprocessing_closesocket__doc__}, static PyObject * _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle); static PyObject * _multiprocessing_closesocket(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; HANDLE handle; handle = PyLong_AsVoidPtr(arg); if (!handle && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_closesocket_impl(module, handle); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_recv__doc__, "recv($module, handle, size, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_RECV_METHODDEF \ {"recv", _PyCFunction_CAST(_multiprocessing_recv), METH_FASTCALL, _multiprocessing_recv__doc__}, static PyObject * _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size); static PyObject * _multiprocessing_recv(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; int size; if (!_PyArg_CheckPositional("recv", nargs, 2, 2)) { goto exit; } handle = PyLong_AsVoidPtr(args[0]); if (!handle && PyErr_Occurred()) { goto exit; } size = PyLong_AsInt(args[1]); if (size == -1 && PyErr_Occurred()) { goto exit; } return_value = _multiprocessing_recv_impl(module, handle, size); exit: return return_value; } #endif /* defined(MS_WINDOWS) */ #if defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_send__doc__, "send($module, handle, buf, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEND_METHODDEF \ {"send", _PyCFunction_CAST(_multiprocessing_send), METH_FASTCALL, _multiprocessing_send__doc__}, static PyObject * _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf); static PyObject * _multiprocessing_send(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; HANDLE handle; Py_buffer buf = {NULL, NULL}; if (!_PyArg_CheckPositional("send", nargs, 2, 2)) { goto exit; } handle = PyLong_AsVoidPtr(args[0]); if (!handle && PyErr_Occurred()) { goto exit; } if (PyObject_GetBuffer(args[1], &buf, PyBUF_SIMPLE) != 0) { goto exit; } return_value = _multiprocessing_send_impl(module, handle, &buf); exit: /* Cleanup for buf */ if (buf.obj) { PyBuffer_Release(&buf); } return return_value; } #endif /* defined(MS_WINDOWS) */ PyDoc_STRVAR(_multiprocessing_sem_unlink__doc__, "sem_unlink($module, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEM_UNLINK_METHODDEF \ {"sem_unlink", (PyCFunction)_multiprocessing_sem_unlink, METH_O, _multiprocessing_sem_unlink__doc__}, static PyObject * _multiprocessing_sem_unlink_impl(PyObject *module, const char *name); static PyObject * _multiprocessing_sem_unlink(PyObject *module, PyObject *arg) { PyObject *return_value = NULL; const char *name; if (!PyUnicode_Check(arg)) { _PyArg_BadArgument("sem_unlink", "argument", "str", arg); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(arg, &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } return_value = _multiprocessing_sem_unlink_impl(module, name); exit: return return_value; } #ifndef _MULTIPROCESSING_CLOSESOCKET_METHODDEF #define _MULTIPROCESSING_CLOSESOCKET_METHODDEF #endif /* !defined(_MULTIPROCESSING_CLOSESOCKET_METHODDEF) */ #ifndef _MULTIPROCESSING_RECV_METHODDEF #define _MULTIPROCESSING_RECV_METHODDEF #endif /* !defined(_MULTIPROCESSING_RECV_METHODDEF) */ #ifndef _MULTIPROCESSING_SEND_METHODDEF #define _MULTIPROCESSING_SEND_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEND_METHODDEF) */ /*[clinic end generated code: output=73b4cb8428d816da input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000051341455552142400306070ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", (PyCFunction)(void(*)(void))_posixshmem_shm_open, METH_VARARGS|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; static char *_keywords[] = {"path", "flags", "mode", NULL}; PyObject *path; int flags; int mode = 511; int _return_value; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Ui|i:shm_open", _keywords, &path, &flags, &mode)) goto exit; _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", (PyCFunction)(void(*)(void))_posixshmem_shm_unlink, METH_VARARGS|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; static char *_keywords[] = {"path", NULL}; PyObject *path; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "U:shm_unlink", _keywords, &path)) goto exit; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=be0661dbed83ea23 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/clinic/semaphore.c.h000066400000000000000000000406571455552142400304070ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) # include "pycore_gc.h" // PyGC_Head # include "pycore_runtime.h" // _Py_ID() #endif #include "pycore_modsupport.h" // _PyArg_UnpackKeywords() #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 2 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(block), &_Py_ID(timeout), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "acquire", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = PyObject_IsTrue(args[0]); if (blocking < 0) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_acquire__doc__, "acquire($self, /, block=True, timeout=None)\n" "--\n" "\n" "Acquire the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF \ {"acquire", _PyCFunction_CAST(_multiprocessing_SemLock_acquire), METH_FASTCALL|METH_KEYWORDS, _multiprocessing_SemLock_acquire__doc__}, static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj); static PyObject * _multiprocessing_SemLock_acquire(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 2 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(block), &_Py_ID(timeout), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"block", "timeout", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "acquire", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[2]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; int blocking = 1; PyObject *timeout_obj = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf); if (!args) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (args[0]) { blocking = PyObject_IsTrue(args[0]); if (blocking < 0) { goto exit; } if (!--noptargs) { goto skip_optional_pos; } } timeout_obj = args[1]; skip_optional_pos: return_value = _multiprocessing_SemLock_acquire_impl(self, blocking, timeout_obj); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) PyDoc_STRVAR(_multiprocessing_SemLock_release__doc__, "release($self, /)\n" "--\n" "\n" "Release the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF \ {"release", (PyCFunction)_multiprocessing_SemLock_release, METH_NOARGS, _multiprocessing_SemLock_release__doc__}, static PyObject * _multiprocessing_SemLock_release_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock_release(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock_release_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) && !defined(MS_WINDOWS) */ #if defined(HAVE_MP_SEMAPHORE) static PyObject * _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink); static PyObject * _multiprocessing_SemLock(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 5 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_item = { &_Py_ID(kind), &_Py_ID(value), &_Py_ID(maxvalue), &_Py_ID(name), &_Py_ID(unlink), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "SemLock", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[5]; PyObject * const *fastargs; Py_ssize_t nargs = PyTuple_GET_SIZE(args); int kind; int value; int maxvalue; const char *name; int unlink; fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 5, 5, 0, argsbuf); if (!fastargs) { goto exit; } kind = PyLong_AsInt(fastargs[0]); if (kind == -1 && PyErr_Occurred()) { goto exit; } value = PyLong_AsInt(fastargs[1]); if (value == -1 && PyErr_Occurred()) { goto exit; } maxvalue = PyLong_AsInt(fastargs[2]); if (maxvalue == -1 && PyErr_Occurred()) { goto exit; } if (!PyUnicode_Check(fastargs[3])) { _PyArg_BadArgument("SemLock", "argument 'name'", "str", fastargs[3]); goto exit; } Py_ssize_t name_length; name = PyUnicode_AsUTF8AndSize(fastargs[3], &name_length); if (name == NULL) { goto exit; } if (strlen(name) != (size_t)name_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } unlink = PyObject_IsTrue(fastargs[4]); if (unlink < 0) { goto exit; } return_value = _multiprocessing_SemLock_impl(type, kind, value, maxvalue, name, unlink); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__rebuild__doc__, "_rebuild($type, handle, kind, maxvalue, name, /)\n" "--\n" "\n"); #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF \ {"_rebuild", _PyCFunction_CAST(_multiprocessing_SemLock__rebuild), METH_FASTCALL|METH_CLASS, _multiprocessing_SemLock__rebuild__doc__}, static PyObject * _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name); static PyObject * _multiprocessing_SemLock__rebuild(PyTypeObject *type, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; SEM_HANDLE handle; int kind; int maxvalue; const char *name; if (!_PyArg_ParseStack(args, nargs, ""F_SEM_HANDLE"iiz:_rebuild", &handle, &kind, &maxvalue, &name)) { goto exit; } return_value = _multiprocessing_SemLock__rebuild_impl(type, handle, kind, maxvalue, name); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__count__doc__, "_count($self, /)\n" "--\n" "\n" "Num of `acquire()`s minus num of `release()`s for this process."); #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF \ {"_count", (PyCFunction)_multiprocessing_SemLock__count, METH_NOARGS, _multiprocessing_SemLock__count__doc__}, static PyObject * _multiprocessing_SemLock__count_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__count_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_mine__doc__, "_is_mine($self, /)\n" "--\n" "\n" "Whether the lock is owned by this thread."); #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF \ {"_is_mine", (PyCFunction)_multiprocessing_SemLock__is_mine, METH_NOARGS, _multiprocessing_SemLock__is_mine__doc__}, static PyObject * _multiprocessing_SemLock__is_mine_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_mine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_mine_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, "_get_value($self, /)\n" "--\n" "\n" "Get the value of the semaphore."); #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__get_value(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__get_value_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, "_is_zero($self, /)\n" "--\n" "\n" "Return whether semaphore has value zero."); #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, static PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__is_zero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__is_zero_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock__after_fork__doc__, "_after_fork($self, /)\n" "--\n" "\n" "Rezero the net acquisition count after fork()."); #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF \ {"_after_fork", (PyCFunction)_multiprocessing_SemLock__after_fork, METH_NOARGS, _multiprocessing_SemLock__after_fork__doc__}, static PyObject * _multiprocessing_SemLock__after_fork_impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock__after_fork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock__after_fork_impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___enter____doc__, "__enter__($self, /)\n" "--\n" "\n" "Enter the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF \ {"__enter__", (PyCFunction)_multiprocessing_SemLock___enter__, METH_NOARGS, _multiprocessing_SemLock___enter____doc__}, static PyObject * _multiprocessing_SemLock___enter___impl(SemLockObject *self); static PyObject * _multiprocessing_SemLock___enter__(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return _multiprocessing_SemLock___enter___impl(self); } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) PyDoc_STRVAR(_multiprocessing_SemLock___exit____doc__, "__exit__($self, exc_type=None, exc_value=None, exc_tb=None, /)\n" "--\n" "\n" "Exit the semaphore/lock."); #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF \ {"__exit__", _PyCFunction_CAST(_multiprocessing_SemLock___exit__), METH_FASTCALL, _multiprocessing_SemLock___exit____doc__}, static PyObject * _multiprocessing_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb); static PyObject * _multiprocessing_SemLock___exit__(SemLockObject *self, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; PyObject *exc_type = Py_None; PyObject *exc_value = Py_None; PyObject *exc_tb = Py_None; if (!_PyArg_CheckPositional("__exit__", nargs, 0, 3)) { goto exit; } if (nargs < 1) { goto skip_optional; } exc_type = args[0]; if (nargs < 2) { goto skip_optional; } exc_value = args[1]; if (nargs < 3) { goto skip_optional; } exc_tb = args[2]; skip_optional: return_value = _multiprocessing_SemLock___exit___impl(self, exc_type, exc_value, exc_tb); exit: return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #ifndef _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #define _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #define _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #define _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #define _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF) */ #ifndef _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF) */ /*[clinic end generated code: output=d57992037e6770b6 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/multiprocess.c000066400000000000000000000162301455552142400274540ustar00rootroot00000000000000/* * Extension module used by multiprocess package * * multiprocess.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /*[python input] class HANDLE_converter(CConverter): type = "HANDLE" format_unit = '"F_HANDLE"' def parse_arg(self, argname, displayname, *, limited_capi): return self.format_code(""" {paramname} = PyLong_AsVoidPtr({argname}); if (!{paramname} && PyErr_Occurred()) {{{{ goto exit; }}}} """, argname=argname) [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3cf0318efc6a8772]*/ /*[clinic input] module _multiprocess [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=01e0745f380ac6e3]*/ #include "clinic/multiprocessing.c.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS /*[clinic input] _multiprocess.closesocket handle: HANDLE / [clinic start generated code]*/ static PyObject * _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) /*[clinic end generated code: output=214f359f900966f4 input=8a20706dd386c6cc]*/ { int ret; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } /*[clinic input] _multiprocess.recv handle: HANDLE size: int / [clinic start generated code]*/ static PyObject * _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) /*[clinic end generated code: output=92322781ba9ff598 input=6a5b0834372cee5b]*/ { int nread; PyObject *buf; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } /*[clinic input] _multiprocess.send handle: HANDLE buf: Py_buffer / [clinic start generated code]*/ static PyObject * _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) /*[clinic end generated code: output=52d7df0519c596cb input=41dce742f98d2210]*/ { int ret, length; length = (int)Py_MIN(buf->len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf->buf, length, 0); Py_END_ALLOW_THREADS if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /*[clinic input] _multiprocess.sem_unlink name: str / [clinic start generated code]*/ static PyObject * _multiprocess_sem_unlink_impl(PyObject *module, const char *name) /*[clinic end generated code: output=fcbfeb1ed255e647 input=bf939aff9564f1d5]*/ { return _PyMp_sem_unlink(name); } /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS _MULTIPROCESSING_CLOSESOCKET_METHODDEF _MULTIPROCESSING_RECV_METHODDEF _MULTIPROCESSING_SEND_METHODDEF #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) _MULTIPROCESSING_SEM_UNLINK_METHODDEF #endif {NULL} }; /* * Initialize */ static int multiprocess_exec(PyObject *module) { #ifdef HAVE_MP_SEMAPHORE PyTypeObject *semlock_type = (PyTypeObject *)PyType_FromModuleAndSpec( module, &_PyMp_SemLockType_spec, NULL); if (semlock_type == NULL) { return -1; } int rc = PyModule_AddType(module, semlock_type); Py_DECREF(semlock_type); if (rc < 0) { return -1; } PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) { py_sem_value_max = PyLong_FromLong(INT_MAX); } else { py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); } if (py_sem_value_max == NULL) { return -1; } if (PyDict_SetItemString(semlock_type->tp_dict, "SEM_VALUE_MAX", py_sem_value_max) < 0) { Py_DECREF(py_sem_value_max); return -1; } Py_DECREF(py_sem_value_max); #endif /* Add configuration macros */ PyObject *flags = PyDict_New(); if (!flags) { return -1; } #define ADD_FLAG(name) \ do { \ PyObject *value = PyLong_FromLong(name); \ if (value == NULL) { \ Py_DECREF(flags); \ return -1; \ } \ if (PyDict_SetItemString(flags, #name, value) < 0) { \ Py_DECREF(flags); \ Py_DECREF(value); \ return -1; \ } \ Py_DECREF(value); \ } while (0) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_Add(module, "flags", flags) < 0) { return -1; } return 0; } static PyModuleDef_Slot multiprocess_slots[] = { {Py_mod_exec, multiprocess_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {0, NULL} }; static struct PyModuleDef multiprocess_module = { PyModuleDef_HEAD_INIT, .m_name = "_multiprocess", .m_size = 0, .m_methods = module_methods, .m_slots = multiprocess_slots, }; PyMODINIT_FUNC PyInit__multiprocess(void) { return PyModuleDef_Init(&multiprocess_module); } uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/multiprocess.h000066400000000000000000000045661455552142400274720ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "structmember.h" #include "pythread.h" #include "pycore_signal.h" // _PyOS_IsMainThread() #ifndef MS_WINDOWS # include // sysconf() #endif /* * Platform includes and definitions */ #ifdef MS_WINDOWS # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN # endif # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX # define HAVE_MP_SEMAPHORE #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # define HAVE_MP_SEMAPHORE # include typedef sem_t *SEM_HANDLE; # endif #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyType_Spec _PyMp_SemLockType_spec; extern PyObject *_PyMp_sem_unlink(const char *name); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/posixshmem.c000066400000000000000000000063271455552142400271250ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #include "pyconfig.h" // Py_GIL_DISABLED #ifndef Py_GIL_DISABLED // Need limited C API version 3.12 for Py_MOD_PER_INTERPRETER_GIL_SUPPORTED #define Py_LIMITED_API 0x030c0000 #endif #include #include // EINTR #ifdef HAVE_SYS_MMAN_H # include // shm_open(), shm_unlink() #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8AndSize(path, NULL); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8AndSize(path, NULL); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static PyModuleDef_Slot module_slots[] = { {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {0, NULL} }; static struct PyModuleDef _posixshmemmodule = { PyModuleDef_HEAD_INIT, .m_name = "_posixshmem", .m_doc = "POSIX shared memory module", .m_size = 0, .m_methods = module_methods, .m_slots = module_slots, }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { return PyModuleDef_Init(&_posixshmemmodule); } uqfoundation-multiprocess-b3457a5/py3.13/Modules/_multiprocess/semaphore.c000066400000000000000000000514231455552142400267110ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" #ifdef HAVE_SYS_TIME_H # include // gettimeofday() #endif #ifdef HAVE_MP_SEMAPHORE enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; /*[python input] class SEM_HANDLE_converter(CConverter): type = "SEM_HANDLE" format_unit = '"F_SEM_HANDLE"' [python start generated code]*/ /*[python end generated code: output=da39a3ee5e6b4b0d input=3e0ad43e482d8716]*/ /*[clinic input] module _multiprocess class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=935fb41b7d032599]*/ #include "clinic/semaphore.c.h" #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ { double timeout; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif #ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } #endif /* !HAVE_SEM_TIMEDWAIT */ /*[clinic input] _multiprocess.SemLock.acquire block as blocking: bool = True timeout as timeout_obj: object = None Acquire the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj) /*[clinic end generated code: output=f9998f0b6b0b0872 input=e5b45f5cbb775166]*/ { int res, err = 0; struct timespec deadline = {0}; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) { return NULL; } if (timeout < 0.0) { timeout = 0.0; } struct timeval now; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } long sec = (long) timeout; long nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } /*[clinic input] _multiprocess.SemLock.release Release the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_release_impl(SemLockObject *self) /*[clinic end generated code: output=b22f53ba96b0d1db input=ba7e63a961885d3d]*/ { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self = (SemLockObject *)type->tp_alloc(type, 0); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } /*[clinic input] @classmethod _multiprocess.SemLock.__new__ kind: int value: int maxvalue: int name: str unlink: bool [clinic start generated code]*/ static PyObject * _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, int maxvalue, const char *name, int unlink) /*[clinic end generated code: output=30727e38f5f7577a input=fdaeb69814471c5b]*/ { SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); return NULL; } /*[clinic input] @classmethod _multiprocess.SemLock._rebuild handle: SEM_HANDLE kind: int maxvalue: int name: str(accept={str, NoneType}) / [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, const char *name) /*[clinic end generated code: output=2aaee14f063f3bd9 input=f7040492ac6d9962]*/ { char *name_copy = NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyErr_SetFromErrno(PyExc_OSError); PyMem_Free(name_copy); return NULL; } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); tp->tp_free(self); Py_DECREF(tp); } /*[clinic input] _multiprocess.SemLock._count Num of `acquire()`s minus num of `release()`s for this process. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__count_impl(SemLockObject *self) /*[clinic end generated code: output=5ba8213900e517bb input=36fc59b1cd1025ab]*/ { return PyLong_FromLong((long)self->count); } /*[clinic input] _multiprocess.SemLock._is_mine Whether the lock is owned by this thread. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_mine_impl(SemLockObject *self) /*[clinic end generated code: output=92dc98863f4303be input=a96664cb2f0093ba]*/ { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } /*[clinic input] _multiprocess.SemLock._get_value Get the value of the semaphore. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__get_value_impl(SemLockObject *self) /*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } /*[clinic input] _multiprocess.SemLock._is_zero Return whether semaphore has value zero. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__is_zero_impl(SemLockObject *self) /*[clinic end generated code: output=815d4c878c806ed7 input=294a446418d31347]*/ { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } /*[clinic input] _multiprocess.SemLock._after_fork Rezero the net acquisition count after fork(). [clinic start generated code]*/ static PyObject * _multiprocess_SemLock__after_fork_impl(SemLockObject *self) /*[clinic end generated code: output=718bb27914c6a6c1 input=190991008a76621e]*/ { self->count = 0; Py_RETURN_NONE; } /*[clinic input] _multiprocess.SemLock.__enter__ Enter the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___enter___impl(SemLockObject *self) /*[clinic end generated code: output=beeb2f07c858511f input=c5e27d594284690b]*/ { return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); } /*[clinic input] _multiprocess.SemLock.__exit__ exc_type: object = None exc_value: object = None exc_tb: object = None / Exit the semaphore/lock. [clinic start generated code]*/ static PyObject * _multiprocess_SemLock___exit___impl(SemLockObject *self, PyObject *exc_type, PyObject *exc_value, PyObject *exc_tb) /*[clinic end generated code: output=3b37c1a9f8b91a03 input=7d644b64a89903f8]*/ { return _multiprocess_SemLock_release_impl(self); } static int semlock_traverse(SemLockObject *s, visitproc visit, void *arg) { Py_VISIT(Py_TYPE(s)); return 0; } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { _MULTIPROCESS_SEMLOCK_ACQUIRE_METHODDEF _MULTIPROCESS_SEMLOCK_RELEASE_METHODDEF _MULTIPROCESS_SEMLOCK___ENTER___METHODDEF _MULTIPROCESS_SEMLOCK___EXIT___METHODDEF _MULTIPROCESS_SEMLOCK__COUNT_METHODDEF _MULTIPROCESS_SEMLOCK__IS_MINE_METHODDEF _MULTIPROCESS_SEMLOCK__GET_VALUE_METHODDEF _MULTIPROCESS_SEMLOCK__IS_ZERO_METHODDEF _MULTIPROCESS_SEMLOCK__REBUILD_METHODDEF _MULTIPROCESS_SEMLOCK__AFTER_FORK_METHODDEF {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), Py_READONLY, ""}, {"kind", Py_T_INT, offsetof(SemLockObject, kind), Py_READONLY, ""}, {"maxvalue", Py_T_INT, offsetof(SemLockObject, maxvalue), Py_READONLY, ""}, {"name", Py_T_STRING, offsetof(SemLockObject, name), Py_READONLY, ""}, {NULL} }; /* * Semaphore type */ static PyType_Slot _PyMp_SemLockType_slots[] = { {Py_tp_dealloc, semlock_dealloc}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_setattro, PyObject_GenericSetAttr}, {Py_tp_methods, semlock_methods}, {Py_tp_members, semlock_members}, {Py_tp_alloc, PyType_GenericAlloc}, {Py_tp_new, _multiprocess_SemLock}, {Py_tp_traverse, semlock_traverse}, {Py_tp_free, PyObject_GC_Del}, {Py_tp_doc, (void *)PyDoc_STR("Semaphore/Mutex type")}, {0, 0}, }; PyType_Spec _PyMp_SemLockType_spec = { .name = "_multiprocess.SemLock", .basicsize = sizeof(SemLockObject), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = _PyMp_SemLockType_slots, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(const char *name) { if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } #endif // HAVE_MP_SEMAPHORE uqfoundation-multiprocess-b3457a5/py3.13/README_MODS000066400000000000000000001036561455552142400217720ustar00rootroot00000000000000cp -rf py3.12/examples . cp -rf py3.12/doc . cp -f py3.12/index.html . cp -rf py3.12/_multiprocess _multiprocess cp -rf Python-3.13.0a1/Modules/_multiprocessing Modules/_multiprocess cp -rf py3.12/multiprocess multiprocess # ---------------------------------------------------------------------- diff /Users/mmckerns/src/Python-3.13.0a1/Modules/_multiprocessing/semaphore.c Modules/_multiprocess/semaphore.c 10c10 < #include "multiprocessing.h" --- > #include "multiprocess.h" 39,40c39,40 < module _multiprocessing < class _multiprocessing.SemLock "SemLockObject *" "&_PyMp_SemLockType" --- > module _multiprocess > class _multiprocess.SemLock "SemLockObject *" "&_PyMp_SemLockType" 84c84 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 93c93 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 175c175 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 181c181 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 300c300 < _multiprocessing.SemLock.acquire --- > _multiprocess.SemLock.acquire 309c309 < _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, --- > _multiprocess_SemLock_acquire_impl(SemLockObject *self, int blocking, 385c385 < _multiprocessing.SemLock.release --- > _multiprocess.SemLock.release 391c391 < _multiprocessing_SemLock_release_impl(SemLockObject *self) --- > _multiprocess_SemLock_release_impl(SemLockObject *self) 475c475 < _multiprocessing.SemLock.__new__ --- > _multiprocess.SemLock.__new__ 486c486 < _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, --- > _multiprocess_SemLock_impl(PyTypeObject *type, int kind, int value, 534c534 < _multiprocessing.SemLock._rebuild --- > _multiprocess.SemLock._rebuild 545c545 < _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, --- > _multiprocess_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, 586c586 < _multiprocessing.SemLock._count --- > _multiprocess.SemLock._count 592c592 < _multiprocessing_SemLock__count_impl(SemLockObject *self) --- > _multiprocess_SemLock__count_impl(SemLockObject *self) 599c599 < _multiprocessing.SemLock._is_mine --- > _multiprocess.SemLock._is_mine 605c605 < _multiprocessing_SemLock__is_mine_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_mine_impl(SemLockObject *self) 613c613 < _multiprocessing.SemLock._get_value --- > _multiprocess.SemLock._get_value 619c619 < _multiprocessing_SemLock__get_value_impl(SemLockObject *self) --- > _multiprocess_SemLock__get_value_impl(SemLockObject *self) 638c638 < _multiprocessing.SemLock._is_zero --- > _multiprocess.SemLock._is_zero 644c644 < _multiprocessing_SemLock__is_zero_impl(SemLockObject *self) --- > _multiprocess_SemLock__is_zero_impl(SemLockObject *self) 666c666 < _multiprocessing.SemLock._after_fork --- > _multiprocess.SemLock._after_fork 672c672 < _multiprocessing_SemLock__after_fork_impl(SemLockObject *self) --- > _multiprocess_SemLock__after_fork_impl(SemLockObject *self) 680c680 < _multiprocessing.SemLock.__enter__ --- > _multiprocess.SemLock.__enter__ 686c686 < _multiprocessing_SemLock___enter___impl(SemLockObject *self) --- > _multiprocess_SemLock___enter___impl(SemLockObject *self) 689c689 < return _multiprocessing_SemLock_acquire_impl(self, 1, Py_None); --- > return _multiprocess_SemLock_acquire_impl(self, 1, Py_None); 693c693 < _multiprocessing.SemLock.__exit__ --- > _multiprocess.SemLock.__exit__ 704c704 < _multiprocessing_SemLock___exit___impl(SemLockObject *self, --- > _multiprocess_SemLock___exit___impl(SemLockObject *self, 709c709 < return _multiprocessing_SemLock_release_impl(self); --- > return _multiprocess_SemLock_release_impl(self); 724,733c724,733 < _MULTIPROCESSING_SEMLOCK_ACQUIRE_METHODDEF < _MULTIPROCESSING_SEMLOCK_RELEASE_METHODDEF < _MULTIPROCESSING_SEMLOCK___ENTER___METHODDEF < _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF < _MULTIPROCESSING_SEMLOCK__COUNT_METHODDEF < _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF < _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF < _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF < _MULTIPROCESSING_SEMLOCK__REBUILD_METHODDEF < _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF --- > _MULTIPROCESS_SEMLOCK_ACQUIRE_METHODDEF > _MULTIPROCESS_SEMLOCK_RELEASE_METHODDEF > _MULTIPROCESS_SEMLOCK___ENTER___METHODDEF > _MULTIPROCESS_SEMLOCK___EXIT___METHODDEF > _MULTIPROCESS_SEMLOCK__COUNT_METHODDEF > _MULTIPROCESS_SEMLOCK__IS_MINE_METHODDEF > _MULTIPROCESS_SEMLOCK__GET_VALUE_METHODDEF > _MULTIPROCESS_SEMLOCK__IS_ZERO_METHODDEF > _MULTIPROCESS_SEMLOCK__REBUILD_METHODDEF > _MULTIPROCESS_SEMLOCK__AFTER_FORK_METHODDEF 764c764 < {Py_tp_new, _multiprocessing_SemLock}, --- > {Py_tp_new, _multiprocess_SemLock}, 772c772 < .name = "_multiprocessing.SemLock", --- > .name = "_multiprocess.SemLock", diff ~/src/Python-3.13.0a1/Modules/_multiprocessing/multiprocessing.h Modules/_multiprocess/multiprocess.h 1,2c1,2 < #ifndef MULTIPROCESSING_H < #define MULTIPROCESSING_H --- > #ifndef MULTIPROCESS_H > #define MULTIPROCESS_H 104c104 < #endif /* MULTIPROCESSING_H */ --- > #endif /* MULTIPROCESS_H */ diff ~/src/Python-3.13.0a1/Modules/_multiprocessing/multiprocessing.c Modules/_multiprocess/multiprocess.c 2c2 < * Extension module used by multiprocessing package --- > * Extension module used by multiprocess package 4c4 < * multiprocessing.c --- > * multiprocess.c 10c10 < #include "multiprocessing.h" --- > #include "multiprocess.h" 30c30 < module _multiprocessing --- > module _multiprocess 77c77 < _multiprocessing.closesocket --- > _multiprocess.closesocket 85c85 < _multiprocessing_closesocket_impl(PyObject *module, HANDLE handle) --- > _multiprocess_closesocket_impl(PyObject *module, HANDLE handle) 100c100 < _multiprocessing.recv --- > _multiprocess.recv 109c109 < _multiprocessing_recv_impl(PyObject *module, HANDLE handle, int size) --- > _multiprocess_recv_impl(PyObject *module, HANDLE handle, int size) 132c132 < _multiprocessing.send --- > _multiprocess.send 141c141 < _multiprocessing_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) --- > _multiprocess_send_impl(PyObject *module, HANDLE handle, Py_buffer *buf) 160c160 < _multiprocessing.sem_unlink --- > _multiprocess.sem_unlink 168c168 < _multiprocessing_sem_unlink_impl(PyObject *module, const char *name) --- > _multiprocess_sem_unlink_impl(PyObject *module, const char *name) 196c196 < multiprocessing_exec(PyObject *module) --- > multiprocess_exec(PyObject *module) 277,278c277,278 < static PyModuleDef_Slot multiprocessing_slots[] = { < {Py_mod_exec, multiprocessing_exec}, --- > static PyModuleDef_Slot multiprocess_slots[] = { > {Py_mod_exec, multiprocess_exec}, 283c283 < static struct PyModuleDef multiprocessing_module = { --- > static struct PyModuleDef multiprocess_module = { 285c285 < .m_name = "_multiprocessing", --- > .m_name = "_multiprocess", 288c288 < .m_slots = multiprocessing_slots, --- > .m_slots = multiprocess_slots, 292c292 < PyInit__multiprocessing(void) --- > PyInit__multiprocess(void) 294c294 < return PyModuleDef_Init(&multiprocessing_module); --- > return PyModuleDef_Init(&multiprocess_module); # ---------------------------------------------------------------------- diff Python-3.12.0rc3/Lib/multiprocessing/connection.py Python-3.13.0a1/Lib/multiprocessing/connection.py 11a12 > import errno 273a275 > _send_ov = None 275a278,281 > ov = self._send_ov > if ov is not None: > # Interrupt WaitForMultipleObjects() in _send_bytes() > ov.cancel() 278a285,288 > if self._send_ov is not None: > # A connection should only be used by a single thread > raise ValueError("concurrent send_bytes() calls " > "are not supported") 279a290 > self._send_ov = ov 288a300 > self._send_ov = None 289a302,306 > if err == _winapi.ERROR_OPERATION_ABORTED: > # close() was called by another thread while > # WaitForMultipleObjects() was waiting for the overlapped > # operation. > raise OSError(errno.EPIPE, "handle is closed") diff Python-3.12.0rc3/Lib/multiprocessing/managers.py Python-3.13.0a1/Lib/multiprocessing/managers.py 93c93,96 < raise convert_to_error(kind, result) --- > try: > raise convert_to_error(kind, result) > finally: > del result # break reference cycle 836c839,842 < raise convert_to_error(kind, result) --- > try: > raise convert_to_error(kind, result) > finally: > del result # break reference cycle diff Python-3.12.0rc3/Lib/multiprocessing/pool.py Python-3.13.0a1/Lib/multiprocessing/pool.py 203c203 < processes = os.cpu_count() or 1 --- > processes = os.process_cpu_count() or 1 diff Python-3.12.0rc3/Lib/multiprocessing/popen_spawn_win32.py Python-3.13.0a1/Lib/multiprocessing/popen_spawn_win32.py 16a17 > # Exit code used by Popen.terminate() 125,126c126,130 < except OSError: < if self.wait(timeout=1.0) is None: --- > except PermissionError: > # ERROR_ACCESS_DENIED (winerror 5) is received when the > # process already died. > code = _winapi.GetExitCodeProcess(int(self._handle)) > if code == _winapi.STILL_ACTIVE: 127a132,134 > self.returncode = code > else: > self.returncode = -signal.SIGTERM diff Python-3.12.0rc3/Lib/multiprocessing/queues.py Python-3.13.0a1/Lib/multiprocessing/queues.py 160a161,174 > def _terminate_broken(self): > # Close a Queue on error. > > # gh-94777: Prevent queue writing to a pipe which is no longer read. > self._reader.close() > > # gh-107219: Close the connection writer which can unblock > # Queue._feed() if it was stuck in send_bytes(). > if sys.platform == 'win32': > self._writer.close() > > self.close() > self.join_thread() > 172c186,187 < name='QueueFeederThread' --- > name='QueueFeederThread', > daemon=True, 174d188 < self._thread.daemon = True 176,178c190,198 < debug('doing self._thread.start()') < self._thread.start() < debug('... done self._thread.start()') --- > try: > debug('doing self._thread.start()') > self._thread.start() > debug('... done self._thread.start()') > except: > # gh-109047: During Python finalization, creating a thread > # can fail with RuntimeError. > self._thread = None > raise diff Python-3.12.0rc3/Lib/multiprocessing/resource_tracker.py Python-3.13.0a1/Lib/multiprocessing/resource_tracker.py 53a54,57 > class ReentrantCallError(RuntimeError): > pass > > 57c61 < self._lock = threading.Lock() --- > self._lock = threading.RLock() 60a65,72 > def _reentrant_call_error(self): > # gh-109629: this happens if an explicit call to the ResourceTracker > # gets interrupted by a garbage collection, invoking a finalizer (*) > # that itself calls back into ResourceTracker. > # (*) for example the SemLock finalizer > raise ReentrantCallError( > "Reentrant call into the multiprocessing resource tracker") > 62a75,78 > # This should not happen (_stop() isn't called by a finalizer) > # but we check for it anyway. > if self._lock._recursion_count() > 1: > return self._reentrant_call_error() 83a100,102 > if self._lock._recursion_count() > 1: > # The code below is certainly not reentrant-safe, so bail out > return self._reentrant_call_error() 162c181,191 < self.ensure_running() --- > try: > self.ensure_running() > except ReentrantCallError: > # The code below might or might not work, depending on whether > # the resource tracker was already running and still alive. > # Better warn the user. > # (XXX is warnings.warn itself reentrant-safe? :-) > warnings.warn( > f"ResourceTracker called reentrantly for resource cleanup, " > f"which is unsupported. " > f"The {rtype} object {name!r} might leak.") 178a208 > 224,226c254,257 < warnings.warn('resource_tracker: There appear to be %d ' < 'leaked %s objects to clean up at shutdown' % < (len(rtype_cache), rtype)) --- > warnings.warn( > f'resource_tracker: There appear to be {len(rtype_cache)} ' > f'leaked {rtype} objects to clean up at shutdown: {rtype_cache}' > ) diff Python-3.12.0rc3/Lib/multiprocessing/util.py Python-3.13.0a1/Lib/multiprocessing/util.py 67,68c67 < logging._acquireLock() < try: --- > with logging._lock: 82,84d80 < finally: < logging._releaseLock() < # ---------------------------------------------------------------------- diff Python-3.12.0rc3/Lib/test/_test_multiprocessing.py Python-3.13.0a1/Lib/test/_test_multiprocessing.py 53c53 < from multiprocessing.connection import wait, AuthenticationError --- > from multiprocessing.connection import wait 81,82c81,82 < if support.check_sanitizer(address=True): < # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to --- > if support.HAVE_ASAN_FORK_BUG: > # gh-89363: Skip multiprocessing tests if Python is built with ASAN to 84c84,89 < raise unittest.SkipTest("libasan has a pthread_create() dead lock") --- > raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") > > > # gh-110666: Tolerate a difference of 100 ms when comparing timings > # (clock resolution) > CLOCK_RES = 0.100 560,561c565 < if os.name != 'nt': < self.assertEqual(exitcode, -signal.SIGTERM) --- > self.assertEqual(exitcode, -signal.SIGTERM) 566a571,572 > else: > self.assertEqual(exitcode, -signal.SIGTERM) 1653c1659 < expected = 0.1 --- > expected = 0.100 1657,1658c1663 < # borrow logic in assertTimeout() from test/lock_tests.py < if not result and expected * 0.6 < dt < expected * 10.0: --- > if not result and (expected - CLOCK_RES) <= dt: 1677c1682 < time.sleep(0.01) --- > time.sleep(0.010) 2576c2581 < res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) --- > res = self.pool.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT)) 2680,2682c2685,2687 < result = self.pool.map_async( < time.sleep, [0.1 for i in range(10000)], chunksize=1 < ) --- > # Simulate slow tasks which take "forever" to complete > args = [support.LONG_TIMEOUT for i in range(10_000)] > result = self.pool.map_async(time.sleep, args, chunksize=1) 2684,2687c2689 < join = TimingWrapper(self.pool.join) < join() < # Sanity check the pool didn't wait for all tasks to finish < self.assertLess(join.elapsed, 2.0) --- > self.pool.join() 3153a3156,3193 > > class FakeConnection: > def send(self, payload): > pass > > def recv(self): > return '#ERROR', pyqueue.Empty() > > class TestManagerExceptions(unittest.TestCase): > # Issue 106558: Manager exceptions avoids creating cyclic references. > def setUp(self): > self.mgr = multiprocessing.Manager() > > def tearDown(self): > self.mgr.shutdown() > self.mgr.join() > > def test_queue_get(self): > queue = self.mgr.Queue() > if gc.isenabled(): > gc.disable() > self.addCleanup(gc.enable) > try: > queue.get_nowait() > except pyqueue.Empty as e: > wr = weakref.ref(e) > self.assertEqual(wr(), None) > > def test_dispatch(self): > if gc.isenabled(): > gc.disable() > self.addCleanup(gc.enable) > try: > multiprocessing.managers.dispatch(FakeConnection(), None, None) > except pyqueue.Empty as e: > wr = weakref.ref(e) > self.assertEqual(wr(), None) > 4872c4912 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4912c4952 < time.sleep(random.random()*0.1) --- > time.sleep(random.random() * 0.100) 4961c5001 < expected = 5 --- > timeout = 5.0 # seconds 4965c5005 < res = wait([a, b], expected) --- > res = wait([a, b], timeout) 4969,4970c5009 < self.assertLess(delta, expected * 2) < self.assertGreater(delta, expected * 0.5) --- > self.assertGreater(delta, timeout - CLOCK_RES) 4973,4974d5011 < < start = time.monotonic() 4976,4977d5012 < delta = time.monotonic() - start < 4979d5013 < self.assertLess(delta, 0.4) 5437c5471,5473 < self.assertEqual(results, [2, 1]) --- > # gh-109706: queue.put(1) can write into the queue before queue.put(2), > # there is no synchronization in the test. > self.assertSetEqual(set(results), set([2, 1])) # ---------------------------------------------------------------------- diff Python-3.13.0a1/Modules/_multiprocessing/clinic/posixshmem.c.h Python-3.13.0a2/Modules/_multiprocessing/clinic/posixshmem.c.h 5,9d4 < #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) < # include "pycore_gc.h" // PyGC_Head < # include "pycore_runtime.h" // _Py_ID() < #endif < 19c14 < {"shm_open", _PyCFunction_CAST(_posixshmem_shm_open), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, --- > {"shm_open", (PyCFunction)(void(*)(void))_posixshmem_shm_open, METH_VARARGS|METH_KEYWORDS, _posixshmem_shm_open__doc__}, 26c21 < _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) --- > _posixshmem_shm_open(PyObject *module, PyObject *args, PyObject *kwargs) 29,55c24 < #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) < < #define NUM_KEYWORDS 3 < static struct { < PyGC_Head _this_is_not_used; < PyObject_VAR_HEAD < PyObject *ob_item[NUM_KEYWORDS]; < } _kwtuple = { < .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) < .ob_item = { &_Py_ID(path), &_Py_ID(flags), &_Py_ID(mode), }, < }; < #undef NUM_KEYWORDS < #define KWTUPLE (&_kwtuple.ob_base.ob_base) < < #else // !Py_BUILD_CORE < # define KWTUPLE NULL < #endif // !Py_BUILD_CORE < < static const char * const _keywords[] = {"path", "flags", "mode", NULL}; < static _PyArg_Parser _parser = { < .keywords = _keywords, < .fname = "shm_open", < .kwtuple = KWTUPLE, < }; < #undef KWTUPLE < PyObject *argsbuf[3]; < Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; --- > static char *_keywords[] = {"path", "flags", "mode", NULL}; 61,78c30,31 < args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); < if (!args) { < goto exit; < } < if (!PyUnicode_Check(args[0])) { < _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); < goto exit; < } < path = args[0]; < flags = PyLong_AsInt(args[1]); < if (flags == -1 && PyErr_Occurred()) { < goto exit; < } < if (!noptargs) { < goto skip_optional_pos; < } < mode = PyLong_AsInt(args[2]); < if (mode == -1 && PyErr_Occurred()) { --- > if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Ui|i:shm_open", _keywords, > &path, &flags, &mode)) 80,81d32 < } < skip_optional_pos: 107c58 < {"shm_unlink", _PyCFunction_CAST(_posixshmem_shm_unlink), METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, --- > {"shm_unlink", (PyCFunction)(void(*)(void))_posixshmem_shm_unlink, METH_VARARGS|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, 113c64 < _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) --- > _posixshmem_shm_unlink(PyObject *module, PyObject *args, PyObject *kwargs) 116,141c67 < #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) < < #define NUM_KEYWORDS 1 < static struct { < PyGC_Head _this_is_not_used; < PyObject_VAR_HEAD < PyObject *ob_item[NUM_KEYWORDS]; < } _kwtuple = { < .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) < .ob_item = { &_Py_ID(path), }, < }; < #undef NUM_KEYWORDS < #define KWTUPLE (&_kwtuple.ob_base.ob_base) < < #else // !Py_BUILD_CORE < # define KWTUPLE NULL < #endif // !Py_BUILD_CORE < < static const char * const _keywords[] = {"path", NULL}; < static _PyArg_Parser _parser = { < .keywords = _keywords, < .fname = "shm_unlink", < .kwtuple = KWTUPLE, < }; < #undef KWTUPLE < PyObject *argsbuf[1]; --- > static char *_keywords[] = {"path", NULL}; 144,145c70,71 < args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); < if (!args) { --- > if (!PyArg_ParseTupleAndKeywords(args, kwargs, "U:shm_unlink", _keywords, > &path)) 147,152d72 < } < if (!PyUnicode_Check(args[0])) { < _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); < goto exit; < } < path = args[0]; 168c88 < /*[clinic end generated code: output=2f356903a281d857 input=a9049054013a1b77]*/ --- > /*[clinic end generated code: output=be0661dbed83ea23 input=a9049054013a1b77]*/ diff Python-3.13.0a1/Modules/_multiprocessing/clinic/semaphore.c.h Python-3.13.0a2/Modules/_multiprocessing/clinic/semaphore.c.h 8a9 > #include "pycore_modsupport.h" // _PyArg_UnpackKeywords() 544c545 < /*[clinic end generated code: output=e8ea65f8cba8e173 input=a9049054013a1b77]*/ --- > /*[clinic end generated code: output=d57992037e6770b6 input=a9049054013a1b77]*/ diff Python-3.13.0a1/Modules/_multiprocessing/clinic/multiprocessing.c.h Python-3.13.0a2/Modules/_multiprocessing/clinic/multiprocessing.c.h 4a5,6 > #include "pycore_modsupport.h" // _PyArg_CheckPositional() > 105,108d106 < if (!PyBuffer_IsContiguous(&buf, 'C')) { < _PyArg_BadArgument("send", "argument 2", "contiguous buffer", args[1]); < goto exit; < } 169c167 < /*[clinic end generated code: output=8b91c020d4353cc5 input=a9049054013a1b77]*/ --- > /*[clinic end generated code: output=73b4cb8428d816da input=a9049054013a1b77]*/ # ---------------------------------------------------------------------- diff Python-3.13.0a1/Modules/_multiprocessing/posixshmem.c Python-3.13.0a2/Modules/_multiprocessing/posixshmem.c 4a5,11 > #include "pyconfig.h" // Py_GIL_DISABLED > > #ifndef Py_GIL_DISABLED > // Need limited C API version 3.12 for Py_MOD_PER_INTERPRETER_GIL_SUPPORTED > #define Py_LIMITED_API 0x030c0000 > #endif > 7c14 < // for shm_open() and shm_unlink() --- > #include // EINTR 9c16 < #include --- > # include // shm_open(), shm_unlink() 11a19 > 43c51 < const char *name = PyUnicode_AsUTF8(path); --- > const char *name = PyUnicode_AsUTF8AndSize(path, NULL); 82c90 < const char *name = PyUnicode_AsUTF8(path); --- > const char *name = PyUnicode_AsUTF8AndSize(path, NULL); # ---------------------------------------------------------------------- diff Python-3.13.0a1/Lib/multiprocessing/managers.py Python-3.13.0a2/Lib/multiprocessing/managers.py 1167a1168 > __class_getitem__ = classmethod(types.GenericAlias) 1169c1170,1171 < DictProxy = MakeProxyType('DictProxy', ( --- > > _BaseDictProxy = MakeProxyType('DictProxy', ( 1174c1176 < DictProxy._method_to_typeid_ = { --- > _BaseDictProxy._method_to_typeid_ = { 1176a1179,1180 > class DictProxy(_BaseDictProxy): > __class_getitem__ = classmethod(types.GenericAlias) # ---------------------------------------------------------------------- diff Python-3.13.0a1/Lib/test/_test_multiprocessing.py Python-3.13.0a2/Lib/test/_test_multiprocessing.py 2441,2442c2441,2445 < def sqr(x, wait=0.0): < time.sleep(wait) --- > def sqr(x, wait=0.0, event=None): > if event is None: > time.sleep(wait) > else: > event.wait(wait) 2581,2584c2584,2595 < res = self.pool.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT)) < get = TimingWrapper(res.get) < self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) < self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) --- > p = self.Pool(3) > try: > event = threading.Event() if self.TYPE == 'threads' else None > res = p.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT, event)) > get = TimingWrapper(res.get) > self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) > self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) > finally: > if event is not None: > event.set() > p.terminate() > p.join() 2684a2696,2698 > if self.TYPE == 'threads': > self.skipTest("Threads cannot be terminated") > 2685a2700 > p = self.Pool(3) 2687,2689c2702,2704 < result = self.pool.map_async(time.sleep, args, chunksize=1) < self.pool.terminate() < self.pool.join() --- > result = p.map_async(time.sleep, args, chunksize=1) > p.terminate() > p.join() # ---------------------------------------------------------------------- diff Python-3.13.0a2/Lib/multiprocessing/managers.py Python-3.13.0a3/Lib/multiprocessing/managers.py 159c159 < self.listener = Listener(address=address, backlog=16) --- > self.listener = Listener(address=address, backlog=128) diff Python-3.13.0a2/Lib/multiprocessing/popen_spawn_win32.py Python-3.13.0a3/Lib/multiprocessing/popen_spawn_win32.py 104,115c104,117 < if self.returncode is None: < if timeout is None: < msecs = _winapi.INFINITE < else: < msecs = max(0, int(timeout * 1000 + 0.5)) < < res = _winapi.WaitForSingleObject(int(self._handle), msecs) < if res == _winapi.WAIT_OBJECT_0: < code = _winapi.GetExitCodeProcess(self._handle) < if code == TERMINATE: < code = -signal.SIGTERM < self.returncode = code --- > if self.returncode is not None: > return self.returncode > > if timeout is None: > msecs = _winapi.INFINITE > else: > msecs = max(0, int(timeout * 1000 + 0.5)) > > res = _winapi.WaitForSingleObject(int(self._handle), msecs) > if res == _winapi.WAIT_OBJECT_0: > code = _winapi.GetExitCodeProcess(self._handle) > if code == TERMINATE: > code = -signal.SIGTERM > self.returncode = code 123,134c125,140 < if self.returncode is None: < try: < _winapi.TerminateProcess(int(self._handle), TERMINATE) < except PermissionError: < # ERROR_ACCESS_DENIED (winerror 5) is received when the < # process already died. < code = _winapi.GetExitCodeProcess(int(self._handle)) < if code == _winapi.STILL_ACTIVE: < raise < self.returncode = code < else: < self.returncode = -signal.SIGTERM --- > if self.returncode is not None: > return > > try: > _winapi.TerminateProcess(int(self._handle), TERMINATE) > except PermissionError: > # ERROR_ACCESS_DENIED (winerror 5) is received when the > # process already died. > code = _winapi.GetExitCodeProcess(int(self._handle)) > if code == _winapi.STILL_ACTIVE: > raise > > # gh-113009: Don't set self.returncode. Even if GetExitCodeProcess() > # returns an exit code different than STILL_ACTIVE, the process can > # still be running. Only set self.returncode once WaitForSingleObject() > # returns WAIT_OBJECT_0 in wait(). diff Python-3.13.0a2/Lib/multiprocessing/resource_sharer.py Python-3.13.0a3/Lib/multiprocessing/resource_sharer.py 126c126 < self._listener = Listener(authkey=process.current_process().authkey) --- > self._listener = Listener(authkey=process.current_process().authkey, backlog=128) diff Python-3.13.0a2/Lib/multiprocessing/shared_memory.py Python-3.13.0a3/Lib/multiprocessing/shared_memory.py 73a74 > _track = True 75c76 < def __init__(self, name=None, create=False, size=0): --- > def __init__(self, name=None, create=False, size=0, *, track=True): 84a86 > self._track = track 119,120c121,122 < < resource_tracker.register(self._name, "shared_memory") --- > if self._track: > resource_tracker.register(self._name, "shared_memory") 239,241c241,250 < In order to ensure proper cleanup of resources, unlink should be < called once (and only once) across all processes which have access < to the shared memory block.""" --- > Unlink should be called once (and only once) across all handles > which have access to the shared memory block, even if these > handles belong to different processes. Closing and unlinking may > happen in any order, but trying to access data inside a shared > memory block after unlinking may result in memory errors, > depending on platform. > > This method has no effect on Windows, where the only way to > delete a shared memory block is to close all handles.""" > 244c253,254 < resource_tracker.unregister(self._name, "shared_memory") --- > if self._track: > resource_tracker.unregister(self._name, "shared_memory") diff Python-3.13.0a2/Lib/multiprocessing/util.py Python-3.13.0a3/Lib/multiprocessing/util.py 46c46 < _logger.log(SUBDEBUG, msg, *args) --- > _logger.log(SUBDEBUG, msg, *args, stacklevel=2) 50c50 < _logger.log(DEBUG, msg, *args) --- > _logger.log(DEBUG, msg, *args, stacklevel=2) 54c54 < _logger.log(INFO, msg, *args) --- > _logger.log(INFO, msg, *args, stacklevel=2) 58c58 < _logger.log(SUBWARNING, msg, *args) --- > _logger.log(SUBWARNING, msg, *args, stacklevel=2) # ---------------------------------------------------------------------- diff Python-3.13.0a2/Lib/test/_test_multiprocessing.py Python-3.13.0a3/Lib/test/_test_multiprocessing.py 4457a4458,4510 > @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") > def test_shared_memory_untracking(self): > # gh-82300: When a separate Python process accesses shared memory > # with track=False, it must not cause the memory to be deleted > # when terminating. > cmd = '''if 1: > import sys > from multiprocessing.shared_memory import SharedMemory > mem = SharedMemory(create=False, name=sys.argv[1], track=False) > mem.close() > ''' > mem = shared_memory.SharedMemory(create=True, size=10) > # The resource tracker shares pipes with the subprocess, and so > # err existing means that the tracker process has terminated now. > try: > rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name) > self.assertNotIn(b"resource_tracker", err) > self.assertEqual(rc, 0) > mem2 = shared_memory.SharedMemory(create=False, name=mem.name) > mem2.close() > finally: > try: > mem.unlink() > except OSError: > pass > mem.close() > > @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") > def test_shared_memory_tracking(self): > # gh-82300: When a separate Python process accesses shared memory > # with track=True, it must cause the memory to be deleted when > # terminating. > cmd = '''if 1: > import sys > from multiprocessing.shared_memory import SharedMemory > mem = SharedMemory(create=False, name=sys.argv[1], track=True) > mem.close() > ''' > mem = shared_memory.SharedMemory(create=True, size=10) > try: > rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name) > self.assertEqual(rc, 0) > self.assertIn( > b"resource_tracker: There appear to be 1 leaked " > b"shared_memory objects to clean up at shutdown", err) > finally: > try: > mem.unlink() > except OSError: > pass > resource_tracker.unregister(mem._name, "shared_memory") > mem.close() > 4673a4727,4749 > def test_filename(self): > logger = multiprocessing.get_logger() > original_level = logger.level > try: > logger.setLevel(util.DEBUG) > stream = io.StringIO() > handler = logging.StreamHandler(stream) > logging_format = '[%(levelname)s] [%(filename)s] %(message)s' > handler.setFormatter(logging.Formatter(logging_format)) > logger.addHandler(handler) > logger.info('1') > util.info('2') > logger.debug('3') > filename = os.path.basename(__file__) > log_record = stream.getvalue() > self.assertIn(f'[INFO] [{filename}] 1', log_record) > self.assertIn(f'[INFO] [{filename}] 2', log_record) > self.assertIn(f'[DEBUG] [{filename}] 3', log_record) > finally: > logger.setLevel(original_level) > logger.removeHandler(handler) > handler.close() > uqfoundation-multiprocess-b3457a5/py3.13/_multprocess/000077500000000000000000000000001455552142400227745ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/_multprocess/__init__.py000066400000000000000000000005011455552142400251010ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.13/doc/000077500000000000000000000000001455552142400210225ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/doc/CHANGES.html000066400000000000000000001133431455552142400227650ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.13/doc/COPYING.html000066400000000000000000000040211455552142400230150ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.13/doc/INSTALL.html000066400000000000000000000063531455552142400230250ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.13/doc/THANKS.html000066400000000000000000000017751455552142400227120ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.13/doc/__init__.py000066400000000000000000000004001455552142400231250ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.13/doc/connection-objects.html000066400000000000000000000152041455552142400255000ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.13/doc/connection-objects.txt000066400000000000000000000072761455552142400253650ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/connection-ref.html000066400000000000000000000357371455552142400246400ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.13/doc/connection-ref.txt000066400000000000000000000210001455552142400244650ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.13/doc/header.txt000066400000000000000000000003401455552142400230100ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.13/doc/html4css1.css000066400000000000000000000126361455552142400233660ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.13/doc/index.html000066400000000000000000000064761455552142400230340ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.13/doc/index.txt000066400000000000000000000021751455552142400226770ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.13/doc/intro.html000066400000000000000000000427461455552142400230600ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.13/doc/intro.txt000066400000000000000000000301551455552142400227220ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.13/doc/manager-objects.html000066400000000000000000000440461455552142400247610ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.13/doc/manager-objects.txt000066400000000000000000000235161455552142400246330ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/pool-objects.html000066400000000000000000000265511455552142400243210ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.13/doc/pool-objects.txt000066400000000000000000000136411455552142400241700ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.13/doc/process-objects.html000066400000000000000000000235741455552142400250300ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.13/doc/process-objects.txt000066400000000000000000000136131455552142400246740ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/processing-ref.html000066400000000000000000000573611455552142400246520ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.13/doc/processing-ref.txt000066400000000000000000000310141455552142400245100ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/programming-guidelines.html000066400000000000000000000214551455552142400263670ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.13/doc/programming-guidelines.txt000066400000000000000000000150221455552142400262330ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.13/doc/proxy-objects.html000066400000000000000000000175771455552142400245410ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.13/doc/proxy-objects.txt000066400000000000000000000115571455552142400244040ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/queue-objects.html000066400000000000000000000227101455552142400244650ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.13/doc/queue-objects.txt000066400000000000000000000121211455552142400243330ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.13/doc/sharedctypes.html000066400000000000000000000241571455552142400244170ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.13/doc/sharedctypes.txt000066400000000000000000000143071455552142400242660ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.13/doc/tests.html000066400000000000000000000060761455552142400230630ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.13/doc/tests.txt000066400000000000000000000027331455552142400227320ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.13/doc/version.txt000066400000000000000000000000341455552142400232450ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.13/examples/000077500000000000000000000000001455552142400220735ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/examples/__init__.py000066400000000000000000000000001455552142400241720ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/examples/benchmarks.py000066400000000000000000000131321455552142400245620ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.13/examples/ex_newtype.py000066400000000000000000000030731455552142400246370ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.13/examples/ex_pool.py000066400000000000000000000155061455552142400241210ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.13/examples/ex_synchronize.py000066400000000000000000000144041455552142400255170ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.13/examples/ex_webserver.py000066400000000000000000000041001455552142400251400ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.13/examples/ex_workers.py000066400000000000000000000042241455552142400246370ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.13/index.html000066400000000000000000000117511455552142400222570ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/000077500000000000000000000000001455552142400230065ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/__init__.py000066400000000000000000000035001455552142400251150ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/connection.py000066400000000000000000001211511455552142400255200ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import errno import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) if m.itemsize > 1: m = m.cast('B') n = m.nbytes if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False _send_ov = None def _close(self, _CloseHandle=_winapi.CloseHandle): ov = self._send_ov if ov is not None: # Interrupt WaitForMultipleObjects() in _send_bytes() ov.cancel() _CloseHandle(self._handle) def _send_bytes(self, buf): if self._send_ov is not None: # A connection should only be used by a single thread raise ValueError("concurrent send_bytes() calls " "are not supported") ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) self._send_ov = ov try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: self._send_ov = None nwritten, err = ov.GetOverlappedResult(True) if err == _winapi.ERROR_OPERATION_ABORTED: # close() was called by another thread while # WaitForMultipleObjects() was waiting for the overlapped # operation. raise OSError(errno.EPIPE, "handle is closed") assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 40 # MUST be > 20 MESSAGE_MAXLEN = 256 # default is None _CHALLENGE = b'#CHALLENGE#' _WELCOME = b'#WELCOME#' _FAILURE = b'#FAILURE#' # multiprocessing.connection Authentication Handshake Protocol Description # (as documented for reference after reading the existing code) # ============================================================================= # # On Windows: native pipes with "overlapped IO" are used to send the bytes, # instead of the length prefix SIZE scheme described below. (ie: the OS deals # with message sizes for us) # # Protocol error behaviors: # # On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater # than the requested maxsize to receive, or receiving fewer than SIZE bytes # results in the connection being closed and auth to fail. # # On Windows, receiving too few bytes is never a low level _recv_bytes read # error, receiving too many will trigger an error only if receive maxsize # value was larger than 128 OR the if the data arrived in smaller pieces. # # Serving side Client side # ------------------------------ --------------------------------------- # 0. Open a connection on the pipe. # 1. Accept connection. # 2. Random 20+ bytes -> MESSAGE # Modern servers always send # more than 20 bytes and include # a {digest} prefix on it with # their preferred HMAC digest. # Legacy ones send ==20 bytes. # 3. send 4 byte length (net order) # prefix followed by: # b'#CHALLENGE#' + MESSAGE # 4. Receive 4 bytes, parse as network byte # order integer. If it is -1, receive an # additional 8 bytes, parse that as network # byte order. The result is the length of # the data that follows -> SIZE. # 5. Receive min(SIZE, 256) bytes -> M1 # 6. Assert that M1 starts with: # b'#CHALLENGE#' # 7. Strip that prefix from M1 into -> M2 # 7.1. Parse M2: if it is exactly 20 bytes in # length this indicates a legacy server # supporting only HMAC-MD5. Otherwise the # 7.2. preferred digest is looked up from an # expected "{digest}" prefix on M2. No prefix # or unsupported digest? <- AuthenticationError # 7.3. Put divined algorithm name in -> D_NAME # 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST # 9. Send 4 byte length prefix (net order) # followed by C_DIGEST bytes. # 10. Receive 4 or 4+8 byte length # prefix (#4 dance) -> SIZE. # 11. Receive min(SIZE, 256) -> C_D. # 11.1. Parse C_D: legacy servers # accept it as is, "md5" -> D_NAME # 11.2. modern servers check the length # of C_D, IF it is 16 bytes? # 11.2.1. "md5" -> D_NAME # and skip to step 12. # 11.3. longer? expect and parse a "{digest}" # prefix into -> D_NAME. # Strip the prefix and store remaining # bytes in -> C_D. # 11.4. Don't like D_NAME? <- AuthenticationError # 12. Compute HMAC-D_NAME of AUTHKEY, # MESSAGE into -> M_DIGEST. # 13. Compare M_DIGEST == C_D: # 14a: Match? Send length prefix & # b'#WELCOME#' # <- RETURN # 14b: Mismatch? Send len prefix & # b'#FAILURE#' # <- CLOSE & AuthenticationError # 15. Receive 4 or 4+8 byte length prefix (net # order) again as in #4 into -> SIZE. # 16. Receive min(SIZE, 256) bytes -> M3. # 17. Compare M3 == b'#WELCOME#': # 17a. Match? <- RETURN # 17b. Mismatch? <- CLOSE & AuthenticationError # # If this RETURNed, the connection remains open: it has been authenticated. # # Length prefixes are used consistently. Even on the legacy protocol, this # was good fortune and allowed us to evolve the protocol by using the length # of the opening challenge or length of the returned digest as a signal as # to which protocol the other end supports. _ALLOWED_DIGESTS = frozenset( {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'}) _MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS) # Old hmac-md5 only server versions from Python <=3.11 sent a message of this # length. It happens to not match the length of any supported digest so we can # use a message of this length to indicate that we should work in backwards # compatible md5-only mode without a {digest_name} prefix on our response. _MD5ONLY_MESSAGE_LENGTH = 20 _MD5_DIGEST_LEN = 16 _LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN) def _get_digest_name_and_payload(message: bytes) -> (str, bytes): """Returns a digest name and the payload for a response hash. If a legacy protocol is detected based on the message length or contents the digest name returned will be empty to indicate legacy mode where MD5 and no digest prefix should be sent. """ # modern message format: b"{digest}payload" longer than 20 bytes # legacy message format: 16 or 20 byte b"payload" if len(message) in _LEGACY_LENGTHS: # Either this was a legacy server challenge, or we're processing # a reply from a legacy client that sent an unprefixed 16-byte # HMAC-MD5 response. All messages using the modern protocol will # be longer than either of these lengths. return '', message if (message.startswith(b'{') and (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0): digest = message[1:curly] if digest in _ALLOWED_DIGESTS: payload = message[curly+1:] return digest.decode('ascii'), payload raise AuthenticationError( 'unsupported message length, missing digest prefix, ' f'or unsupported digest: {message=}') def _create_response(authkey, message): """Create a MAC based on authkey and message The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response is the raw MAC, otherwise the response is prefixed with '{digest_name}', e.g. b'{sha256}abcdefg...' Note: The MAC protects the entire message including the digest_name prefix. """ import hmac digest_name = _get_digest_name_and_payload(message)[0] # The MAC protects the entire message: digest header and payload. if not digest_name: # Legacy server without a {digest} prefix on message. # Generate a legacy non-prefixed HMAC-MD5 reply. try: return hmac.new(authkey, message, 'md5').digest() except ValueError: # HMAC-MD5 is not available (FIPS mode?), fall back to # HMAC-SHA2-256 modern protocol. The legacy server probably # doesn't support it and will reject us anyways. :shrug: digest_name = 'sha256' # Modern protocol, indicate the digest used in the reply. response = hmac.new(authkey, message, digest_name).digest() return b'{%s}%s' % (digest_name.encode('ascii'), response) def _verify_challenge(authkey, message, response): """Verify MAC challenge If our message did not include a digest_name prefix, the client is allowed to select a stronger digest_name from _ALLOWED_DIGESTS. In case our message is prefixed, a client cannot downgrade to a weaker algorithm, because the MAC is calculated over the entire message including the '{digest_name}' prefix. """ import hmac response_digest, response_mac = _get_digest_name_and_payload(response) response_digest = response_digest or 'md5' try: expected = hmac.new(authkey, message, response_digest).digest() except ValueError: raise AuthenticationError(f'{response_digest=} unsupported') if len(expected) != len(response_mac): raise AuthenticationError( f'expected {response_digest!r} of length {len(expected)} ' f'got {len(response_mac)}') if not hmac.compare_digest(expected, response_mac): raise AuthenticationError('digest received was wrong') def deliver_challenge(connection, authkey: bytes, digest_name='sha256'): if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint" message = os.urandom(MESSAGE_LENGTH) message = b'{%s}%s' % (digest_name.encode('ascii'), message) # Even when sending a challenge to a legacy client that does not support # digest prefixes, they'll take the entire thing as a challenge and # respond to it with a raw HMAC-MD5. connection.send_bytes(_CHALLENGE + message) response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message try: _verify_challenge(authkey, message, response) except AuthenticationError: connection.send_bytes(_FAILURE) raise else: connection.send_bytes(_WELCOME) def answer_challenge(connection, authkey: bytes): if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message if not message.startswith(_CHALLENGE): raise AuthenticationError( f'Protocol error, expected challenge: {message=}') message = message[len(_CHALLENGE):] if len(message) < _MD5ONLY_MESSAGE_LENGTH: raise AuthenticationError('challenge too short: {len(message)} bytes') digest = _create_response(authkey, message) connection.send_bytes(digest) response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message if response != _WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects shareable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/context.py000066400000000000000000000266461455552142400250620ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) @staticmethod def _after_fork(): return _default_context.get_context().Process._after_fork() class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): """Returns a list of the supported start methods, default first.""" if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/dummy/000077500000000000000000000000001455552142400241415ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262670ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266600ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/forkserver.py000066400000000000000000000275421455552142400255620ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in modules_names): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/heap.py000066400000000000000000000265521455552142400243070ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/managers.py000066400000000000000000001355621455552142400251710ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) del view_type, view_types # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result try: raise convert_to_error(kind, result) finally: del result # break reference cycle def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=128) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def _handle_request(self, c): request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) def handle_request(self, conn): ''' Handle a new connection ''' try: self._handle_request(conn) except SystemExit: # Server.serve_client() calls sys.exit(0) on EOF pass finally: conn.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None, *, shutdown_timeout=1.0): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() self._shutdown_timeout = shutdown_timeout def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client, self._shutdown_timeout), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client, shutdown_timeout): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=shutdown_timeout) if process.is_alive(): util.info('manager still alive after terminate') process.kill() process.join() state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy try: raise convert_to_error(kind, result) finally: del result # break reference cycle def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self __class_getitem__ = classmethod(types.GenericAlias) _BaseDictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) _BaseDictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } class DictProxy(_BaseDictProxy): __class_getitem__ = classmethod(types.GenericAlias) ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/pool.py000066400000000000000000001000001455552142400243200ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.process_cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if maxtasksperchild is not None: if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: raise ValueError("maxtasksperchild must be a positive int or None") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_handler not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/popen_fork.py000066400000000000000000000045061455552142400255270ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267560ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400271450ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/popen_spawn_win32.py000066400000000000000000000106431455552142400267370ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # # Exit code used by Popen.terminate() TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): cmd[0] = python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None cmd = ' '.join('"%s"' % x for x in cmd) with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is not None: return self.returncode if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is not None: return try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except PermissionError: # ERROR_ACCESS_DENIED (winerror 5) is received when the # process already died. code = _winapi.GetExitCodeProcess(int(self._handle)) if code == _winapi.STILL_ACTIVE: raise # gh-113009: Don't set self.returncode. Even if GetExitCodeProcess() # returns an exit code different than STILL_ACTIVE, the process can # still be running. Only set self.returncode once WaitForSingleObject() # returns WAIT_OBJECT_0 in wait(). kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/process.py000066400000000000000000000275451455552142400250530ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if (child_popen := p._popen) and child_popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: self._after_fork() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode @staticmethod def _after_fork(): from . import util util._finalizer_registry.clear() util._run_after_forkers() # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' del name, signum # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/queues.py000066400000000000000000000310111455552142400246630ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _terminate_broken(self): # Close a Queue on error. # gh-94777: Prevent queue writing to a pipe which is no longer read. self._reader.close() # gh-107219: Close the connection writer which can unblock # Queue._feed() if it was stuck in send_bytes(). if sys.platform == 'win32': self._writer.close() self.close() self.join_thread() def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread', daemon=True, ) try: debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') except: # gh-109047: During Python finalization, creating a thread # can fail with RuntimeError. self._thread = None raise if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() __class_getitem__ = classmethod(types.GenericAlias) _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/reduction.py000066400000000000000000000226451455552142400253650ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/resource_sharer.py000066400000000000000000000120311455552142400265500ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey, backlog=128) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/resource_tracker.py000066400000000000000000000242721455552142400267310ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem # Use sem_unlink() to clean up named semaphores. # # sem_unlink() may be missing if the Python build process detected the # absence of POSIX named semaphores. In that case, no named semaphores were # ever opened, so no cleanup would be necessary. if hasattr(_multiprocessing, 'sem_unlink'): _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, }) _CLEANUP_FUNCS.update({ 'shared_memory': _posixshmem.shm_unlink, }) class ReentrantCallError(RuntimeError): pass class ResourceTracker(object): def __init__(self): self._lock = threading.RLock() self._fd = None self._pid = None def _reentrant_call_error(self): # gh-109629: this happens if an explicit call to the ResourceTracker # gets interrupted by a garbage collection, invoking a finalizer (*) # that itself calls back into ResourceTracker. # (*) for example the SemLock finalizer raise ReentrantCallError( "Reentrant call into the multiprocess resource tracker") def _stop(self): with self._lock: # This should not happen (_stop() isn't called by a finalizer) # but we check for it anyway. if self._lock._recursion_count() > 1: return self._reentrant_call_error() if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._lock._recursion_count() > 1: # The code below is certainly not reentrant-safe, so bail out return self._reentrant_call_error() if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): try: self.ensure_running() except ReentrantCallError: # The code below might or might not work, depending on whether # the resource tracker was already running and still alive. # Better warn the user. # (XXX is warnings.warn itself reentrant-safe? :-) warnings.warn( f"ResourceTracker called reentrantly for resource cleanup, " f"which is unsupported. " f"The {rtype} object {name!r} might leak.") msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(msg) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('msg too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn( f'resource_tracker: There appear to be {len(rtype_cache)} ' f'leaked {rtype} objects to clean up at shutdown: {rtype_cache}' ) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/shared_memory.py000066400000000000000000000447461455552142400262350ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True from . import resource_tracker _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False _track = True def __init__(self, name=None, create=False, size=0, *, track=True): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") self._track = track if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise if self._track: resource_tracker.register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) try: size = _winapi.VirtualQuerySize(p_buf) finally: _winapi.UnmapViewOfFile(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. Unlink should be called once (and only once) across all handles which have access to the shared memory block, even if these handles belong to different processes. Closing and unlinking may happen in any order, but trying to access data inside a shared memory block after unlinking may result in memory errors, depending on platform. This method has no effect on Windows, where the only way to delete a shared memory block is to close all handles.""" if _USE_POSIX and self._name: _posixshmem.shm_unlink(self._name) if self._track: resource_tracker.unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260610ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/spawn.py000066400000000000000000000226511455552142400245160ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe") def set_executable(exe): global _python_exe if exe is None: _python_exe = exe elif sys.platform == 'win32': _python_exe = os.fsdecode(exe) else: _python_exe = os.fsencode(exe) def get_executable(): return _python_exe if WINSERVICE: set_executable(os.path.join(sys.exec_prefix, 'python.exe')) else: set_executable(sys.executable) # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() exe = get_executable() return [exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. To fix this issue, refer to the "Safe importing of main module" section in https://docs.python.org/3/library/multiprocessing.html ''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/synchronize.py000066400000000000000000000303321455552142400257340ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() self._is_fork_ctx = ctx.get_start_method() == 'fork' unlink_now = sys.platform == 'win32' or self._is_fork_ctx for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: if self._is_fork_ctx: raise RuntimeError('A SemLock created in a fork context is being ' 'shared with a process in a spawn context. This is ' 'not supported. Please use the same context to create ' 'multiprocess objects and Process.') h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() # Ensure that deserialized SemLock can be serialized again (gh-108520). self._is_fork_ctx = False @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False def __repr__(self) -> str: set_status = 'set' if self.is_set() else 'unset' return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>" # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/000077500000000000000000000000001455552142400241505ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/__init__.py000066400000000000000000006417001455552142400262710ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import textwrap import time import io import itertools import sys import os import gc import errno import functools import signal import array import socket import random import logging import subprocess import struct import operator import pathlib import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import script_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess.connection import wait from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if support.HAVE_ASAN_FORK_BUG: # gh-89363: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork") # gh-110666: Tolerate a difference of 100 ms when comparing timings # (clock resolution) CLOCK_RES = 0.100 # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 # BaseManager.shutdown_timeout SHUTDOWN_TIMEOUT = support.SHORT_TIMEOUT WAIT_ACTIVE_CHILDREN_TIMEOUT = 5.0 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) def only_run_in_spawn_testsuite(reason): """Returns a decorator: raises SkipTest when SM != spawn at test time. This can be useful to save overall Python test suite execution time. "spawn" is the universal mode available on all platforms so this limits the decorated test to only execute within test_multiprocessing_spawn. This would not be necessary if we refactored our test suite to split things into other test files when they are not start method specific to be rerun under all start methods. """ def decorator(test_item): @functools.wraps(test_item) def spawn_check_wrapper(*args, **kwargs): if (start_method := multiprocessing.get_start_method()) != "spawn": raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}") return test_item(*args, **kwargs) return spawn_check_wrapper return decorator class TestInternalDecorators(unittest.TestCase): """Logic within a test suite that could errantly skip tests? Test it!""" @unittest.skipIf(sys.platform == "win32", "test requires that fork exists.") def test_only_run_in_spawn_testsuite(self): if multiprocessing.get_start_method() != "spawn": raise unittest.SkipTest("only run in test_multiprocessing_spawn.") try: @only_run_in_spawn_testsuite("testing this decorator") def return_four_if_spawn(): return 4 except Exception as err: self.fail(f"expected decorated `def` not to raise; caught {err}") orig_start_method = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method("spawn", force=True) self.assertEqual(return_four_if_spawn(), 4) multiprocessing.set_start_method("fork", force=True) with self.assertRaises(unittest.SkipTest) as ctx: return_four_if_spawn() self.assertIn("testing this decorator", str(ctx.exception)) self.assertIn("start_method=", str(ctx.exception)) finally: multiprocessing.set_start_method(orig_start_method, force=True) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_set_executable(self): if self.TYPE == 'threads': self.skipTest(f'test not appropriate for {self.TYPE}') paths = [ sys.executable, # str sys.executable.encode(), # bytes pathlib.Path(sys.executable) # os.PathLike ] for path in paths: self.set_executable(path) p = self.Process() p.start() p.join() self.assertEqual(p.exitcode, 0) @support.requires_resource('cpu') def test_args_argument(self): # bpo-45735: Using list or tuple as *args* in constructor could # achieve the same effect. args_cases = (1, "str", [1], (1,)) args_types = (list, tuple) test_cases = itertools.product(args_cases, args_types) for args, args_type in test_cases: with self.subTest(args=args, args_type=args_type): q = self.Queue(1) # pass a tuple or list as args p = self.Process(target=self._test_args, args=args_type((q, args))) p.daemon = True p.start() child_args = q.get() self.assertEqual(child_args, args) p.join() close_queue(q) @classmethod def _test_args(cls, q, arg): q.put(arg) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) else: self.assertEqual(exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @support.requires_resource('walltime') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) #self.assertIn("1/0 # MARKER", err) #FIXME @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.100 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt if not result and (expected - CLOCK_RES) <= dt: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.010) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() def test_repr(self) -> None: event = self.Event() if self.TYPE == 'processes': self.assertRegex(repr(event), r"") event.set() self.assertRegex(repr(event), r"") event.clear() self.assertRegex(repr(event), r"") elif self.TYPE == 'manager': self.assertRegex(repr(event), r" 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('sha256') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('sha256') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() @unittest.skipIf(True, "fails with dill >= 0.3.5") def test_shared_memory_recreate(self): # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) def test_invalid_shared_memory_cration(self): # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_pickle_unpickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) sms.buf[0:6] = b'pickle' # Test pickling pickled_sms = pickle.dumps(sms, protocol=proto) # Test unpickling sms2 = pickle.loads(pickled_sms) self.assertIsInstance(sms2, shared_memory.SharedMemory) self.assertEqual(sms.name, sms2.name) self.assertEqual(bytes(sms.buf[0:6]), b'pickle') self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') # Test that unpickled version is still the same SharedMemory sms.buf[0:6] = b'newval' self.assertEqual(bytes(sms.buf[0:6]), b'newval') self.assertEqual(bytes(sms2.buf[0:6]), b'newval') sms2.buf[0:6] = b'oldval' self.assertEqual(bytes(sms.buf[0:6]), b'oldval') self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') def test_shared_memory_pickle_unpickle_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sms.close() sms.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(pickled_sms) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl, protocol=proto) deserialized_sl = pickle.loads(serialized_sl) self.assertIsInstance( deserialized_sl, shared_memory.ShareableList) self.assertEqual(deserialized_sl[-1], 9) self.assertIsNot(sl, deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") sl[3] = "newvalue" self.assertEqual(deserialized_sl[3], "newvalue") larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_ShareableList_pickling_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) serialized_sl = pickle.dumps(sl, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sl.shm.close() sl.shm.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(serialized_sl) def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() err_msg = ("A SharedMemory segment was leaked after " "a process was abruptly terminated") for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg): try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_untracking(self): # gh-82300: When a separate Python process accesses shared memory # with track=False, it must not cause the memory to be deleted # when terminating. cmd = '''if 1: import sys from multiprocessing.shared_memory import SharedMemory mem = SharedMemory(create=False, name=sys.argv[1], track=False) mem.close() ''' mem = shared_memory.SharedMemory(create=True, size=10) # The resource tracker shares pipes with the subprocess, and so # err existing means that the tracker process has terminated now. try: rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name) self.assertNotIn(b"resource_tracker", err) self.assertEqual(rc, 0) mem2 = shared_memory.SharedMemory(create=False, name=mem.name) mem2.close() finally: try: mem.unlink() except OSError: pass mem.close() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_tracking(self): # gh-82300: When a separate Python process accesses shared memory # with track=True, it must cause the memory to be deleted when # terminating. cmd = '''if 1: import sys from multiprocessing.shared_memory import SharedMemory mem = SharedMemory(create=False, name=sys.argv[1], track=True) mem.close() ''' mem = shared_memory.SharedMemory(create=True, size=10) try: rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name) self.assertEqual(rc, 0) self.assertIn( b"resource_tracker: There appear to be 1 leaked " b"shared_memory objects to clean up at shutdown", err) finally: try: mem.unlink() except OSError: pass resource_tracker.unregister(mem._name, "shared_memory") mem.close() # # Test to verify that `Finalize` works. # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @support.requires_resource('cpu') def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) def test_filename(self): logger = multiprocessing.get_logger() original_level = logger.level try: logger.setLevel(util.DEBUG) stream = io.StringIO() handler = logging.StreamHandler(stream) logging_format = '[%(levelname)s] [%(filename)s] %(message)s' handler.setFormatter(logging.Formatter(logging_format)) logger.addHandler(handler) logger.info('1') util.info('2') logger.debug('3') filename = os.path.basename(__file__) log_record = stream.getvalue() self.assertIn(f'[INFO] [{filename}] 1', log_record) self.assertIn(f'[INFO] [{filename}] 2', log_record) self.assertIn(f'[DEBUG] [{filename}] 3', log_record) finally: logger.setLevel(original_level) logger.removeHandler(handler) handler.close() # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('sha256') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection._CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') @hashlib_helper.requires_hashdigest('md5') @hashlib_helper.requires_hashdigest('sha256') class ChallengeResponseTest(unittest.TestCase): authkey = b'supadupasecretkey' def create_response(self, message): return multiprocessing.connection._create_response( self.authkey, message ) def verify_challenge(self, message, response): return multiprocessing.connection._verify_challenge( self.authkey, message, response ) def test_challengeresponse(self): for algo in [None, "md5", "sha256"]: with self.subTest(f"{algo=}"): msg = b'is-twenty-bytes-long' # The length of a legacy message. if algo: prefix = b'{%s}' % algo.encode("ascii") else: prefix = b'' msg = prefix + msg response = self.create_response(msg) if not response.startswith(prefix): self.fail(response) self.verify_challenge(msg, response) # TODO(gpshead): We need integration tests for handshakes between modern # deliver_challenge() and verify_response() code and connections running a # test-local copy of the legacy Python <=3.11 implementations. # TODO(gpshead): properly annotate tests for requires_hashdigest rather than # only running these on a platform supporting everything. otherwise logic # issues preventing it from working on FIPS mode setups will be hidden. # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('sha256') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random() * 0.100) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random() * 0.100) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) @support.requires_resource('walltime') def test_wait_timeout(self): from multiprocess.connection import wait timeout = 5.0 # seconds a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], timeout) delta = time.monotonic() - start self.assertEqual(res, []) self.assertGreater(delta, timeout - CLOCK_RES) b.send(None) res = wait([a, b], 20) self.assertEqual(res, [a]) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) @support.requires_resource('walltime') def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls, start_method): import json mp = multiprocessing.get_context(start_method) r, w = mp.Pipe(duplex=False) p = mp.Process(target=cls.run_in_grandchild, args=(w,)) with warnings.catch_warnings(category=DeprecationWarning): p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ( 'from multiprocess.tests import TestFlags; ' f'TestFlags.run_in_child({multiprocessing.get_start_method()!r})' ) data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_context_check_module_types(self): try: ctx = multiprocessing.get_context('forkserver') except ValueError: raise unittest.SkipTest('forkserver should be available') with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'): ctx.set_forkserver_preload([1, 2, 3]) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "Only Spawn on windows so no risk of mixing") @only_run_in_spawn_testsuite("avoids redundant testing.") def test_mixed_startmethod(self): # Fork-based locks cannot be used with spawned process for process_method in ["spawn", "forkserver"]: queue = multiprocessing.get_context("fork").Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) err_msg = "A SemLock created in a fork" with self.assertRaisesRegex(RuntimeError, err_msg): p.start() # non-fork-based locks can be used with all other start methods for queue_method in ["spawn", "forkserver"]: for process_method in multiprocessing.get_all_start_methods(): queue = multiprocessing.get_context(queue_method).Queue() process_ctx = multiprocessing.get_context(process_method) p = process_ctx.Process(target=close_queue, args=(queue,)) p.start() p.join() @classmethod def _put_one_in_queue(cls, queue): queue.put(1) @classmethod def _put_two_and_nest_once(cls, queue): queue.put(2) process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,)) process.start() process.join() def test_nested_startmethod(self): # gh-108520: Regression test to ensure that child process can send its # arguments to another process queue = multiprocessing.Queue() process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,)) process.start() process.join() results = [] while not queue.empty(): results.append(queue.get()) # gh-109706: queue.put(1) can write into the queue before queue.put(2), # there is no synchronization in the test. self.assertSetEqual(set(results), set([2, 1])) @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() err_msg = (f"A {rtype} resource was leaked after a process was " f"abruptly terminated") for _ in support.sleeping_retry(support.SHORT_TIMEOUT, err_msg): try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) def test_too_long_name_resource(self): # gh-96819: Resource names that will make the length of a write to a pipe # greater than PIPE_BUF are not allowed rtype = "shared_memory" too_long_name_resource = "a" * (512 - len(rtype)) with self.assertRaises(ValueError): resource_tracker.register(too_long_name_resource, rtype) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('sha256') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT start_time = time.monotonic() for _ in support.sleeping_retry(timeout, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): case = unittest.TestCase() case.assertEqual(obj[0], 5) case.assertEqual(obj.count(5), 1) case.assertEqual(obj.index(5), 0) obj.sort() obj.reverse() for x in obj: pass case.assertEqual(len(obj), 1) case.assertEqual(obj.pop(0), 5) def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) self.assertIsNotNone(o) self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): case = unittest.TestCase() case.assertEqual(len(obj), 1) case.assertEqual(obj['foo'], 5) case.assertEqual(obj.get('foo'), 5) case.assertListEqual(list(obj.items()), [('foo', 5)]) case.assertListEqual(list(obj.keys()), ['foo']) case.assertListEqual(list(obj.values()), [5]) case.assertDictEqual(obj.copy(), {'foo': 5}) case.assertTupleEqual(obj.popitem(), ('foo', 5)) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) self.assertIsNotNone(o) self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): case = unittest.TestCase() case.assertEqual(obj.value, 1) case.assertEqual(obj.get(), 1) obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): case = unittest.TestCase() case.assertEqual(obj[0], 0) case.assertEqual(obj[1], 1) case.assertEqual(len(obj), 2) case.assertListEqual(list(obj), [0, 1]) def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): case = unittest.TestCase() case.assertEqual(obj.x, 0) case.assertEqual(obj.y, 1) def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class TestNamedResource(unittest.TestCase): @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("spawn specific test.") def test_global_named_resource_spawn(self): # # gh-90549: Check that global named resources in main module # will not leak by a subprocess, in spawn context. # testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) with open(testfn, 'w', encoding='utf-8') as f: f.write(textwrap.dedent('''\ import multiprocess as mp ctx = mp.get_context('spawn') global_resource = ctx.Semaphore() def submain(): pass if __name__ == '__main__': p = ctx.Process(target=submain) p.start() p.join() ''')) rc, out, err = script_helper.assert_python_ok(testfn) # on error, err = 'UserWarning: resource_tracker: There appear to # be 1 leaked semaphore objects to clean up at shutdown' self.assertFalse(err, msg=err.decode('utf-8')) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) @unittest.skipIf(True, "ModuleNotFoundError") #XXX: since only_run_in_spawn @only_run_in_spawn_testsuite("avoids redundant testing.") def test_spawn_sys_executable_none_allows_import(self): # Regression test for a bug introduced in # https://github.com/python/cpython/issues/90876 that caused an # ImportError in multiprocessing when sys.executable was None. # This can be true in embedded environments. rc, out, err = script_helper.assert_python_ok( "-c", """if 1: import sys sys.executable = None assert "multiprocess" not in sys.modules, "already imported!" import multiprocess as multiprocessing import multiprocess.spawn # This should not fail\n""", ) self.assertEqual(rc, 0) self.assertFalse(err, msg=err.decode('utf-8')) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) set_executable = staticmethod(multiprocessing.set_executable) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT start_time = time.monotonic() for _ in support.sleeping_retry(timeout, error=False): if len(multiprocessing.active_children()) <= 1: break else: dt = time.monotonic() - start_time support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt:.1f} seconds") gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method, only_type=None, exclude_types=False): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: if only_type and type_ != only_type: continue if exclude_types: continue newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('sha256')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): if only_type: continue class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') @unittest.skipIf(sys.platform != "linux", "Linux only") class SemLockTests(unittest.TestCase): def test_semlock_subclass(self): class SemLock(_multiprocessing.SemLock): pass name = f'test_semlock_subclass-{os.getpid()}' s = SemLock(1, 0, 10, name, False) _multiprocessing.sem_unlink(name) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/__main__.py000066400000000000000000000015701455552142400262450ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + '__init__.py') + \ glob.glob(suite + os.path.sep + '*' + os.path.sep + '__init__.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400271530ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400266510ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_fork/000077500000000000000000000000001455552142400314575ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_fork/__init__.py000066400000000000000000000014751455552142400335770ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003021455552142400344160ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_fork/test_misc.py000066400000000000000000000003011455552142400340150ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003041455552142400350140ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003021455552142400344360ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'fork', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserver/000077500000000000000000000000001455552142400327065ustar00rootroot00000000000000__init__.py000066400000000000000000000013421455552142400347400ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserverimport os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003101455552142400356440ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="manager") if __name__ == '__main__': unittest.main() test_misc.py000066400000000000000000000003071455552142400351730ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003121455552142400362420ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003101455552142400356640ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_forkserverimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'forkserver', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271071455552142400336670ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support from test.support import import_helper # Skip tests if _multiprocessing wasn't built. import_helper.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support import os_helper from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method from test import support # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) # up to 1 min to report the results for _ in support.sleeping_retry(support.LONG_TIMEOUT, "Timed out waiting for results"): if results: break results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method from test import support start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) # up to 1 min to report the results for _ in support.sleeping_retry(support.LONG_TIMEOUT, "Timed out waiting for results"): if results: break results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawn/000077500000000000000000000000001455552142400316465ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawn/__init__.py000066400000000000000000000011771455552142400337650ustar00rootroot00000000000000import os.path import sys import unittest from test import support import glob import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) test_manager.py000066400000000000000000000003031455552142400346060ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="manager") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawn/test_misc.py000066400000000000000000000003021455552142400342050ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', exclude_types=True) if __name__ == '__main__': unittest.main() test_processes.py000066400000000000000000000003051455552142400352040ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="processes") if __name__ == '__main__': unittest.main() test_threads.py000066400000000000000000000003031455552142400346260ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/tests/test_multiprocessing_spawnimport unittest from multiprocess.tests import install_tests_in_module_dict install_tests_in_module_dict(globals(), 'spawn', only_type="threads") if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.13/multiprocess/util.py000066400000000000000000000333531455552142400243440ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args, stacklevel=2) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args, stacklevel=2) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args, stacklevel=2) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args, stacklevel=2) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging with logging._lock: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, encoding="utf-8", closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess import subprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [path], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, -1, None, None, None, -1, None, subprocess._USE_VFORK) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/py3.8/000077500000000000000000000000001455552142400202015ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/Modules/000077500000000000000000000000001455552142400216115ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/000077500000000000000000000000001455552142400245015ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400257425ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000077751455552142400305500ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", (PyCFunction)(void(*)(void))_posixshmem_shm_open, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", "flags", "mode", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_open", 0}; PyObject *argsbuf[3]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; PyObject *path; int flags; int mode = 511; int _return_value; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; if (PyFloat_Check(args[1])) { PyErr_SetString(PyExc_TypeError, "integer argument expected, got float" ); goto exit; } flags = _PyLong_AsInt(args[1]); if (flags == -1 && PyErr_Occurred()) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (PyFloat_Check(args[2])) { PyErr_SetString(PyExc_TypeError, "integer argument expected, got float" ); goto exit; } mode = _PyLong_AsInt(args[2]); if (mode == -1 && PyErr_Occurred()) { goto exit; } skip_optional_pos: _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", (PyCFunction)(void(*)(void))_posixshmem_shm_unlink, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_unlink", 0}; PyObject *argsbuf[1]; PyObject *path; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=9132861c61d8c2d8 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/multiprocess.c000066400000000000000000000125431455552142400274030ustar00rootroot00000000000000/* * Extension module used by multiprocessing package * * multiprocessing.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS static PyObject * multiprocessing_closesocket(PyObject *self, PyObject *args) { HANDLE handle; int ret; if (!PyArg_ParseTuple(args, F_HANDLE ":closesocket" , &handle)) return NULL; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } static PyObject * multiprocessing_recv(PyObject *self, PyObject *args) { HANDLE handle; int size, nread; PyObject *buf; if (!PyArg_ParseTuple(args, F_HANDLE "i:recv" , &handle, &size)) return NULL; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } static PyObject * multiprocessing_send(PyObject *self, PyObject *args) { HANDLE handle; Py_buffer buf; int ret, length; if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf)) return NULL; length = (int)Py_MIN(buf.len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf.buf, length, 0); Py_END_ALLOW_THREADS PyBuffer_Release(&buf); if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS {"closesocket", multiprocessing_closesocket, METH_VARARGS, ""}, {"recv", multiprocessing_recv, METH_VARARGS, ""}, {"send", multiprocessing_send, METH_VARARGS, ""}, #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) {"sem_unlink", _PyMp_sem_unlink, METH_VARARGS, ""}, #endif {NULL} }; /* * Initialize */ static struct PyModuleDef multiprocessing_module = { PyModuleDef_HEAD_INIT, "_multiprocess", NULL, -1, module_methods, NULL, NULL, NULL, NULL }; PyMODINIT_FUNC PyInit__multiprocess(void) { PyObject *module, *temp, *value = NULL; /* Initialize module */ module = PyModule_Create(&multiprocessing_module); if (!module) return NULL; #if defined(MS_WINDOWS) || \ (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) /* Add _PyMp_SemLock type to module */ if (PyType_Ready(&_PyMp_SemLockType) < 0) return NULL; Py_INCREF(&_PyMp_SemLockType); { PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) py_sem_value_max = PyLong_FromLong(INT_MAX); else py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); if (py_sem_value_max == NULL) return NULL; PyDict_SetItemString(_PyMp_SemLockType.tp_dict, "SEM_VALUE_MAX", py_sem_value_max); } PyModule_AddObject(module, "SemLock", (PyObject*)&_PyMp_SemLockType); #endif /* Add configuration macros */ temp = PyDict_New(); if (!temp) return NULL; #define ADD_FLAG(name) \ value = Py_BuildValue("i", name); \ if (value == NULL) { Py_DECREF(temp); return NULL; } \ if (PyDict_SetItemString(temp, #name, value) < 0) { \ Py_DECREF(temp); Py_DECREF(value); return NULL; } \ Py_DECREF(value) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", temp) < 0) return NULL; return module; } uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/multiprocess.h000066400000000000000000000045241455552142400274100ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # include typedef sem_t *SEM_HANDLE; # endif # define HANDLE int # define SOCKET int # define BOOL int # define UINT32 uint32_t # define INT32 int32_t # define TRUE 1 # define FALSE 0 # define INVALID_HANDLE_VALUE (-1) #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE # define F_DWORD "k" # define T_DWORD T_ULONG #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyTypeObject _PyMp_SemLockType; extern PyObject *_PyMp_sem_unlink(PyObject *ignore, PyObject *args); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/posixshmem.c000066400000000000000000000060071455552142400270440ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #define PY_SSIZE_T_CLEAN #include #include "structmember.h" // for shm_open() and shm_unlink() #ifdef HAVE_SYS_MMAN_H #include #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static struct PyModuleDef this_module = { PyModuleDef_HEAD_INIT, // m_base "_posixshmem", // m_name "POSIX shared memory module", // m_doc -1, // m_size (space allocated for module globals) module_methods, // m_methods }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { PyObject *module; module = PyModule_Create(&this_module); if (!module) { return NULL; } return module; } uqfoundation-multiprocess-b3457a5/py3.8/Modules/_multiprocess/semaphore.c000066400000000000000000000452541455552142400266420ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } static PyObject * semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1; double timeout; PyObject *timeout_obj = Py_None; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } static PyObject * semlock_release(SemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif // ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } // endif /* !HAVE_SEM_TIMEDWAIT */ static PyObject * semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1, res, err = 0; double timeout; PyObject *timeout_obj = Py_None; struct timespec deadline = {0}; struct timeval now; long sec, nsec; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } if (timeout_obj != Py_None) { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; if (timeout < 0.0) timeout = 0.0; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } sec = (long) timeout; nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (timeout_obj == Py_None) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } static PyObject * semlock_release(SemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self; self = PyObject_New(SemLockObject, type); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } static PyObject * semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { SEM_HANDLE handle = SEM_FAILED; int kind, maxvalue, value, unlink; PyObject *result; char *name, *name_copy = NULL; static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } return NULL; } static PyObject * semlock_rebuild(PyTypeObject *type, PyObject *args) { SEM_HANDLE handle; int kind, maxvalue; char *name, *name_copy = NULL; if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", &handle, &kind, &maxvalue, &name)) return NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyMem_Free(name_copy); return PyErr_SetFromErrno(PyExc_OSError); } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); PyObject_Del(self); } static PyObject * semlock_count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return PyLong_FromLong((long)self->count); } static PyObject * semlock_ismine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } static PyObject * semlock_getvalue(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } static PyObject * semlock_iszero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } static PyObject * semlock_afterfork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { self->count = 0; Py_RETURN_NONE; } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { {"acquire", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, "acquire the semaphore/lock"}, {"release", (PyCFunction)semlock_release, METH_NOARGS, "release the semaphore/lock"}, {"__enter__", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, "enter the semaphore/lock"}, {"__exit__", (PyCFunction)semlock_release, METH_VARARGS, "exit the semaphore/lock"}, {"_count", (PyCFunction)semlock_count, METH_NOARGS, "num of `acquire()`s minus num of `release()`s for this process"}, {"_is_mine", (PyCFunction)semlock_ismine, METH_NOARGS, "whether the lock is owned by this thread"}, {"_get_value", (PyCFunction)semlock_getvalue, METH_NOARGS, "get the value of the semaphore"}, {"_is_zero", (PyCFunction)semlock_iszero, METH_NOARGS, "returns whether semaphore has value zero"}, {"_rebuild", (PyCFunction)semlock_rebuild, METH_VARARGS | METH_CLASS, ""}, {"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(SemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(SemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject _PyMp_SemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_multiprocess.SemLock", /* tp_basicsize */ sizeof(SemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)semlock_dealloc, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ semlock_methods, /* tp_members */ semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ semlock_new, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(PyObject *ignore, PyObject *args) { char *name; if (!PyArg_ParseTuple(args, "s", &name)) return NULL; if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } uqfoundation-multiprocess-b3457a5/py3.8/README_MODS000066400000000000000000001160461455552142400217130ustar00rootroot00000000000000cp -rf py3.7/examples . cp -rf py3.7/doc . cp -f py3.7/index.html . cp -rf py3.7/Modules/_multiprocess/* Modules/_multiprocess cp -f Python-3.8.0b1/Modules/multiprocessing/posixshmem.c Modules/_multiprocess cp -rf Python-3.8.0b1/Modules/multiprocessing/clinic Modules/_multiprocess cp -f Python-3.8.0b1/Lib/multiprocessing/* multiprocess cp -rf py3.7/multiprocess/dummy multiprocess cp -f Python-3.8.0b1/Lib/test/*test_multiprocessing*.py multiprocess/tests cp Python-3.8.8/Lib/test/mp_*py multiprocess/tests cp Python-3.8.8/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py # ---------------------------------------------------------------------- diff Python-3.7.3/Modules/_multiprocessing/semaphore.c Python-3.8.0b1/Modules/_multiprocessing/semaphore.c 144c144 < "value %d", res); --- > "value %u", res); 524c524 < semlock_count(SemLockObject *self) --- > semlock_count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) 530c530 < semlock_ismine(SemLockObject *self) --- > semlock_ismine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) 537c537 < semlock_getvalue(SemLockObject *self) --- > semlock_getvalue(SemLockObject *self, PyObject *Py_UNUSED(ignored)) 555c555 < semlock_iszero(SemLockObject *self) --- > semlock_iszero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) 576c576 < semlock_afterfork(SemLockObject *self) --- > semlock_afterfork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) 587c587 < {"acquire", (PyCFunction)semlock_acquire, METH_VARARGS | METH_KEYWORDS, --- > {"acquire", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, 591c591 < {"__enter__", (PyCFunction)semlock_acquire, METH_VARARGS | METH_KEYWORDS, --- > {"__enter__", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, 636c636 < /* tp_print */ 0, --- > /* tp_vectorcall_offset */ 0, 639c639 < /* tp_reserved */ 0, --- > /* tp_as_async */ 0, diff Python-3.7.3/Lib/multiprocessing/dummy/__init__.py Python-3.8.0b1/Lib/multiprocessing/dummy/__init__.py 83c83 < def __init__(self, **kwds): --- > def __init__(self, /, **kwds): diff Python-3.8.0b1/Lib/multiprocessing/__init__.py py3.8/multiprocess/__init__.py 17a18,19 > __version__ = '0.70.8.dev0' > diff Python-3.8.0b1/Lib/multiprocessing/connection.py py3.8/multiprocess/connection.py 21c21,24 < import _multiprocessing --- > try: > import _multiprocess as _multiprocessing > except ImportError: > import _multiprocessing 60c63 < return time.monotonic() + timeout --- > return getattr(time,'monotonic',time.time)() + timeout 63c66 < return time.monotonic() > t --- > return getattr(time,'monotonic',time.time)() > t 927c930 < deadline = time.monotonic() + timeout --- > deadline = getattr(time,'monotonic',time.time)() + timeout 935c938 < timeout = deadline - time.monotonic() --- > timeout = deadline - getattr(time,'monotonic',time.time)() Common subdirectories: Python-3.8.0b1/Lib/multiprocessing/dummy and py3.8/multiprocess/dummy diff Python-3.8.0b1/Lib/multiprocessing/forkserver.py py3.8/multiprocess/forkserver.py 106c106 < cmd = ('from multiprocessing.forkserver import main; ' + --- > cmd = ('from multiprocess.forkserver import main; ' + diff Python-3.8.0b1/Lib/multiprocessing/managers.py py3.8/multiprocess/managers.py 500c500 < listener_client = { --- > listener_client = { #XXX: register dill? 1087c1087 < endtime = time.monotonic() + timeout --- > endtime = getattr(time,'monotonic',time.time)() + timeout 1093c1093 < waittime = endtime - time.monotonic() --- > waittime = endtime - getattr(time,'monotonic',time.time)() 1177c1177 < '__setitem__', 'clear', 'copy', 'get', 'items', --- > '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 1218c1218 < The `multiprocessing.Manager()` function creates started instances of --- > The `multiprocess.Manager()` function creates started instances of diff Python-3.8.0b1/Lib/multiprocessing/popen_fork.py py3.8/multiprocess/popen_fork.py 43c43 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait diff Python-3.8.0b1/Lib/multiprocessing/popen_forkserver.py py3.8/multiprocess/popen_forkserver.py 63c63 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait diff Python-3.8.0b1/Lib/multiprocessing/queues.py py3.8/multiprocess/queues.py 22c22,25 < import _multiprocessing --- > try: > import _multiprocess as _multiprocessing > except ImportError: > import _multiprocessing 101c104 < deadline = time.monotonic() + timeout --- > deadline = getattr(time,'monotonic',time.time)() + timeout 106c109 < timeout = deadline - time.monotonic() --- > timeout = deadline - getattr(time,'monotonic',time.time)() diff Python-3.8.0b1/Lib/multiprocessing/reduction.py py3.8/multiprocess/reduction.py 15c15,18 < import pickle --- > try: > import dill as pickle > except ImportError: > import pickle 34c37 < '''Pickler subclass used by multiprocessing.''' --- > '''Pickler subclass used by multiprocess.''' 254c257 < used in multiprocessing.''' --- > used in multiprocess.''' diff Python-3.8.0b1/Lib/multiprocessing/resource_tracker.py py3.8/multiprocess/resource_tracker.py 37c37,40 < import _multiprocessing --- > try: > import _multiprocess as _multiprocessing > except ImportError: > import _multiprocessing 91c94 < cmd = 'from multiprocessing.resource_tracker import main;main(%d)' --- > cmd = 'from multiprocess.resource_tracker import main;main(%d)' diff Python-3.8.0b1/Lib/multiprocessing/spawn.py py3.8/multiprocess/spawn.py 86c86 < prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)' --- > prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' diff Python-3.8.0b1/Lib/multiprocessing/synchronize.py py3.8/multiprocess/synchronize.py 17c17,20 < import _multiprocessing --- > try: > import _multiprocess as _multiprocessing > except ImportError: > import _multiprocessing 28,33c31,39 < from _multiprocessing import SemLock, sem_unlink < except (ImportError): < raise ImportError("This platform lacks a functioning sem_open" + < " implementation, therefore, the required" + < " synchronization primitives needed will not" + < " function, see issue 3770.") --- > from _multiprocess import SemLock, sem_unlink > except ImportError: > try: > from _multiprocessing import SemLock, sem_unlink > except (ImportError): > raise ImportError("This platform lacks a functioning sem_open" + > " implementation, therefore, the required" + > " synchronization primitives needed will not" + > " function, see issue 3770.") 304c310 < endtime = time.monotonic() + timeout --- > endtime = getattr(time,'monotonic',time.time)() + timeout 310c316 < waittime = endtime - time.monotonic() --- > waittime = endtime - getattr(time,'monotonic',time.time)() Only in py3.8/multiprocess: tests diff Python-3.8.0b1/Lib/multiprocessing/util.py py3.8/multiprocess/util.py 38c38 < LOGGER_NAME = 'multiprocessing' --- > LOGGER_NAME = 'multiprocess' 62c62 < Returns logger used by multiprocessing --- > Returns logger used by multiprocess diff Python-3.8.0b1/Lib/test/_test_multiprocessing.py ~/dev/svn/pathos/multiprocess/py3.8/multiprocess/tests/__init__.py 23c23 < import pickle --- > import pickle #XXX: use dill? 32c32 < _multiprocessing = test.support.import_module('_multiprocessing') --- > _multiprocessing = test.support.import_module('_multiprocess') 34c34 < test.support.import_module('multiprocessing.synchronize') --- > test.support.import_module('multiprocess.synchronize') 37,42c37,43 < import multiprocessing.connection < import multiprocessing.dummy < import multiprocessing.heap < import multiprocessing.managers < import multiprocessing.pool < import multiprocessing.queues --- > import multiprocess as multiprocessing > import multiprocess.connection > import multiprocess.dummy > import multiprocess.heap > import multiprocess.managers > import multiprocess.pool > import multiprocess.queues 44c45 < from multiprocessing import util --- > from multiprocess import util 47c48 < from multiprocessing import reduction --- > from multiprocess import reduction 53c54 < from multiprocessing.sharedctypes import Value, copy --- > from multiprocess.sharedctypes import Value, copy 59c60 < from multiprocessing import shared_memory --- > from multiprocess import shared_memory 93c94 < from multiprocessing import resource_tracker --- > from multiprocess import resource_tracker 121c122 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 134c135 < PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] --- > PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] 173c174 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 177c178 < self.elapsed = time.monotonic() - t --- > self.elapsed = getattr(time,'monotonic',time.time)() - t 289c290 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 325c326 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 470a472 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 722c724 < from multiprocessing.forkserver import _forkserver --- > from multiprocess.forkserver import _forkserver 811c813 < self.assertIn("test_multiprocessing.py", err) --- > self.assertIn("__init__.py", err) 1092c1094 < import multiprocessing --- > import multiprocess as multiprocessing 1110c1112 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 1112c1114 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 1516c1518 < dt = time.monotonic() --- > dt = getattr(time,'monotonic',time.time)() 1518c1520 < dt = time.monotonic() - dt --- > dt = getattr(time,'monotonic',time.time)() - dt 1987c1989 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 2553a2556 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2595a2599 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2609c2613 < t_start = time.monotonic() --- > t_start = getattr(time,'monotonic',time.time)() 2621c2625 < self.assertGreater(time.monotonic() - t_start, 0.9) --- > self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) 2693,2694c2697,2698 < def test_unpickleable_result(self): < from multiprocessing.pool import MaybeEncodingError --- > def _test_unpickleable_result(self): > from multiprocess.pool import MaybeEncodingError 2764c2768 < from multiprocessing.managers import BaseManager, BaseProxy, RemoteError --- > from multiprocess.managers import BaseManager, BaseProxy, RemoteError 3390c3394 < from multiprocessing import resource_sharer --- > from multiprocess import resource_sharer 3832c3836 < from multiprocessing.managers import SharedMemoryManager --- > from multiprocess.managers import SharedMemoryManager 3989c3993 < from multiprocessing import shared_memory --- > from multiprocess import shared_memory 4007c4011 < deadline = time.monotonic() + 60 --- > deadline = getattr(time,'monotonic',time.time)() + 60 4009c4013 < while time.monotonic() < deadline: --- > while getattr(time,'monotonic',time.time)() < deadline: 4161,4163c4165,4167 < modules = ['multiprocessing.' + m for m in modules] < modules.remove('multiprocessing.__init__') < modules.append('multiprocessing') --- > modules = ['multiprocess.' + m for m in modules] > modules.remove('multiprocess.__init__') > modules.append('multiprocess') 4169,4171c4173,4175 < modules.remove('multiprocessing.popen_fork') < modules.remove('multiprocessing.popen_forkserver') < modules.remove('multiprocessing.popen_spawn_posix') --- > modules.remove('multiprocess.popen_fork') > modules.remove('multiprocess.popen_forkserver') > modules.remove('multiprocess.popen_spawn_posix') 4173c4177 < modules.remove('multiprocessing.popen_spawn_win32') --- > modules.remove('multiprocess.popen_spawn_win32') 4175c4179 < modules.remove('multiprocessing.popen_forkserver') --- > modules.remove('multiprocess.popen_forkserver') 4179c4183 < modules.remove('multiprocessing.sharedctypes') --- > modules.remove('multiprocess.sharedctypes') 4459c4463 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4499c4503 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4540c4544 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4545c4549 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4547c4551 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4555c4559 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4557c4561 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4568c4572 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4581c4585 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4583c4587 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4591c4595 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4593c4597 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4600c4604 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4602c4606 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4611c4615 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4613c4617 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 4615c4619 < t = time.monotonic() - t --- > t = getattr(time,'monotonic',time.time)() - t 4662c4666 < prog = ('from test._test_multiprocessing import TestFlags; ' + --- > prog = ('from __init__ import TestFlags; ' + 4708c4712 < def test_noforkbomb(self): --- > def _test_noforkbomb(self): 4837c4841 < def test_ignore(self): --- > def _test_ignore(self): 4945c4949 < def test_preload_resources(self): --- > def _test_preload_resources(self): 4962c4966 < def test_resource_tracker(self): --- > def _test_resource_tracker(self): 4968,4970c4972,4974 < import multiprocessing as mp < from multiprocessing import resource_tracker < from multiprocessing.shared_memory import SharedMemory --- > import multiprocess as mp > from multiprocess import resource_tracker > from multiprocess.shared_memory import SharedMemory 5030c5034 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5078c5082 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5087c5091 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5231c5235 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5236c5240 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5239c5243 < print("Warning -- multiprocessing.Manager still has %s active " --- > print("Warning -- multiprocess.Manager still has %s active " 5510c5514 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5515c5519 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5518c5522 < print("Warning -- multiprocessing.Manager still has %s active " --- > print("Warning -- multiprocess.Manager still has %s active " diff Python-3.8.0b1/Lib/multiprocessing/popen_spawn_win32.py Python-3.8.0b2/Lib/multiprocessing/popen_spawn_win32.py 25,26c25 < WINENV = (hasattr(sys, '_base_executable') and < not _path_eq(sys.executable, sys._base_executable)) --- > WINENV = not _path_eq(sys.executable, sys._base_executable) diff Python-3.8.0b1/Modules/_multiprocessing/clinic/posixshmem.c.h Python-3.8.0b4/Modules/_multiprocessing/clinic/posixshmem.c.h 38c38 < _PyArg_BadArgument("shm_open", 1, "str", args[0]); --- > _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); 111c111 < _PyArg_BadArgument("shm_unlink", 1, "str", args[0]); --- > _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); 133c133 < /*[clinic end generated code: output=be42e23c18677c0f input=a9049054013a1b77]*/ --- > /*[clinic end generated code: output=9132861c61d8c2d8 input=a9049054013a1b77]*/ diff Python-3.8.0b1/Lib/multiprocessing/forkserver.py Python-3.8.0b4/Lib/multiprocessing/forkserver.py 41a42,60 > def _stop(self): > # Method used by unit tests to stop the server > with self._lock: > self._stop_unlocked() > > def _stop_unlocked(self): > if self._forkserver_pid is None: > return > > # close the "alive" file descriptor asks the server to stop > os.close(self._forkserver_alive_fd) > self._forkserver_alive_fd = None > > os.waitpid(self._forkserver_pid, 0) > self._forkserver_pid = None > > os.unlink(self._forkserver_address) > self._forkserver_address = None > diff Python-3.8.0b1/Lib/multiprocessing/util.py Python-3.8.0b4/Lib/multiprocessing/util.py 108a109,117 > def _remove_temp_dir(rmtree, tempdir): > rmtree(tempdir) > > current_process = process.current_process() > # current_process() can be None if the finalizer is called > # late during Python finalization > if current_process is not None: > current_process._config['tempdir'] = None > 116c125,128 < Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) --- > # keep a strong reference to shutil.rmtree(), since the finalizer > # can be called late during Python shutdown > Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), > exitpriority=-100) # ---------------------------------------------------------------------- ADDED *args, **kwds for ForkingPickler in __init__, dump, and dumps # ---------------------------------------------------------------------- diff Python-3.8.0b4/Lib/multiprocessing/context.py py3.8/multiprocessing/context.py 312c312 < _default_context = DefaultContext(_concrete_contexts['spawn']) --- > _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn # ---------------------------------------------------------------------- diff Python-3.8.0b4/Lib/multiprocessing/popen_spawn_win32.py Python-3.8.1/Lib/multiprocessing/popen_spawn_win32.py 75c75 < env, None, False, 0, None, None, None) --- > None, None, False, 0, env, None, None) diff Python-3.8.0b4/Lib/multiprocessing/process.py Python-3.8.1/Lib/multiprocessing/process.py 303a304,305 > if threading._HAVE_THREAD_NATIVE_ID: > threading.main_thread()._set_native_id() diff Python-3.8.0b4/Lib/multiprocessing/resource_tracker.py Python-3.8.1/Lib/multiprocessing/resource_tracker.py 52a53,65 > def _stop(self): > with self._lock: > if self._fd is None: > # not running > return > > # closing the "alive" file descriptor stops main() > os.close(self._fd) > self._fd = None > > os.waitpid(self._pid, 0) > self._pid = None > diff Python-3.8.0b4/Lib/multiprocessing/spawn.py Python-3.8.1/Lib/multiprocessing/spawn.py 39c39 < _python_exe = sys.executable --- > _python_exe = sys._base_executable diff Python-3.8.0b4/Lib/multiprocessing/util.py Python-3.8.1/Lib/multiprocessing/util.py 241c241 < x += ', exitprority=' + str(self._key[0]) --- > x += ', exitpriority=' + str(self._key[0]) 441a442,466 > > > def _cleanup_tests(): > """Cleanup multiprocessing resources when multiprocessing tests > completed.""" > > from test import support > > # cleanup multiprocessing > process._cleanup() > > # Stop the ForkServer process if it's running > from multiprocessing import forkserver > forkserver._forkserver._stop() > > # Stop the ResourceTracker process if it's running > from multiprocessing import resource_tracker > resource_tracker._resource_tracker._stop() > > # bpo-37421: Explicitly call _run_finalizers() to remove immediately > # temporary directories created by multiprocessing.util.get_temp_dir(). > _run_finalizers() > support.gc_collect() > > support.reap_children() # ---------------------------------------------------------------------- diff Python-3.8.1/Lib/multiprocessing/connection.py Python-3.8.3/Lib/multiprocessing/connection.py 105c105 < elif type(address) is str: --- > elif type(address) is str or util.is_abstract_socket_namespace(address): 600c600,601 < if family == 'AF_UNIX': --- > if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): > # Linux abstract socket namespaces do not need to be explicitly unlinked diff Python-3.8.1/Lib/multiprocessing/forkserver.py Python-3.8.3/Lib/multiprocessing/forkserver.py 58c58,59 < os.unlink(self._forkserver_address) --- > if not util.is_abstract_socket_namespace(self._forkserver_address): > os.unlink(self._forkserver_address) 138c139,140 < os.chmod(address, 0o600) --- > if not util.is_abstract_socket_namespace(address): > os.chmod(address, 0o600) diff Python-3.8.1/Lib/multiprocessing/managers.py Python-3.8.3/Lib/multiprocessing/managers.py 62c62 < Type to uniquely indentify a shared object --- > Type to uniquely identify a shared object 824c824 < Try to call a method of the referrent and return a copy of the result --- > Try to call a method of the referent and return a copy of the result 1291a1292,1295 > address = self.address > # The address of Linux abstract namespaces can be bytes > if isinstance(address, bytes): > address = os.fsdecode(address) 1293c1297 < _SharedMemoryTracker(f"shmm_{self.address}_{getpid()}") --- > _SharedMemoryTracker(f"shm_{address}_{getpid()}") diff Python-3.8.1/Lib/multiprocessing/pool.py Python-3.8.3/Lib/multiprocessing/pool.py 654,655d653 < self._worker_handler._state = TERMINATE < self._change_notifier.put(None) 684a683,685 > # Notify that the worker_handler state has been changed so the > # _handle_workers loop can be unblocked (and exited) in order to > # send the finalization sentinel all the workers. 685a687,688 > change_notifier.put(None) > diff Python-3.8.1/Lib/multiprocessing/shared_memory.py Python-3.8.3/Lib/multiprocessing/shared_memory.py 435a436 > encoded_value = value 437,438c438,441 < if len(value) > self._allocated_bytes[position]: < raise ValueError("exceeds available storage for existing str") --- > encoded_value = (value.encode(_encoding) > if isinstance(value, str) else value) > if len(encoded_value) > self._allocated_bytes[position]: > raise ValueError("bytes/str item exceeds available storage") 451,452c454 < value = value.encode(_encoding) if isinstance(value, str) else value < struct.pack_into(new_format, self.shm.buf, offset, value) --- > struct.pack_into(new_format, self.shm.buf, offset, encoded_value) diff Python-3.8.1/Lib/multiprocessing/spawn.py Python-3.8.3/Lib/multiprocessing/spawn.py 39c39 < _python_exe = sys._base_executable --- > _python_exe = sys.executable diff Python-3.8.1/Lib/multiprocessing/util.py Python-3.8.3/Lib/multiprocessing/util.py 104a105,127 > > # Abstract socket support > > def _platform_supports_abstract_sockets(): > if sys.platform == "linux": > return True > if hasattr(sys, 'getandroidapilevel'): > return True > return False > > > def is_abstract_socket_namespace(address): > if not address: > return False > if isinstance(address, bytes): > return address[0] == 0 > elif isinstance(address, str): > return address[0] == "\0" > raise TypeError('address type of {address!r} unrecognized') > > > abstract_sockets_supported = _platform_supports_abstract_sockets() # ---------------------------------------------------------------------- diff Python-3.8.3/Lib/multiprocessing/context.py Python-3.8.6/Lib/multiprocessing/context.py 259a260 > methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] 261,263c262,264 < return ['fork', 'spawn', 'forkserver'] < else: < return ['fork', 'spawn'] --- > methods.append('forkserver') > return methods > diff Python-3.8.3/Lib/multiprocessing/shared_memory.py Python-3.8.6/Lib/multiprocessing/shared_memory.py 77a78,79 > if size == 0: > raise ValueError("'size' must be a positive number different from zero") diff Python-3.8.3/Lib/multiprocessing/synchronize.py Python-3.8.6/Lib/multiprocessing/synchronize.py 273c273 < False), ('notify: Should not have been able to acquire' --- > False), ('notify: Should not have been able to acquire ' # ---------------------------------------------------------------------- diff Python-3.8.8/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py 23c23 < import pickle --- > import pickle #XXX: use dill? 34c34 < support.skip_if_broken_multiprocessing_synchronize() --- > test.support.import_module('multiprocess.synchronize') 37,42c37,43 < import multiprocessing.connection < import multiprocessing.dummy < import multiprocessing.heap < import multiprocessing.managers < import multiprocessing.pool < import multiprocessing.queues --- > import multiprocess as multiprocessing > import multiprocess.connection > import multiprocess.dummy > import multiprocess.heap > import multiprocess.managers > import multiprocess.pool > import multiprocess.queues 44c45 < from multiprocessing import util --- > from multiprocess import util 47c48 < from multiprocessing import reduction --- > from multiprocess import reduction 53c54 < from multiprocessing.sharedctypes import Value, copy --- > from multiprocess.sharedctypes import Value, copy 59c60 < from multiprocessing import shared_memory --- > from multiprocess import shared_memory 93c94 < from multiprocessing import resource_tracker --- > from multiprocess import resource_tracker 121c122 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 134c135 < PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] --- > PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] 173c174 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 177c178 < self.elapsed = time.monotonic() - t --- > self.elapsed = getattr(time,'monotonic',time.time)() - t 289c290 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 292c293 < def test_parent_process(self): --- > def _test_parent_process(self): 325c326 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 492a494 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 744c746 < from multiprocessing.forkserver import _forkserver --- > from multiprocess.forkserver import _forkserver 833c835 < self.assertIn("test_multiprocessing.py", err) --- > self.assertIn("__init__.py", err) 1114c1116 < import multiprocessing --- > import multiprocess as multiprocessing 1132c1134 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 1134c1136 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 1537c1539 < dt = time.monotonic() --- > dt = getattr(time,'monotonic',time.time)() 1539c1541 < dt = time.monotonic() - dt --- > dt = getattr(time,'monotonic',time.time)() - dt 2008c2010 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 2574a2577 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2616a2620 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2630c2634 < t_start = time.monotonic() --- > t_start = getattr(time,'monotonic',time.time)() 2642c2646 < self.assertGreater(time.monotonic() - t_start, 0.9) --- > self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) 2714,2715c2718,2719 < def test_unpickleable_result(self): < from multiprocessing.pool import MaybeEncodingError --- > def _test_unpickleable_result(self): > from multiprocess.pool import MaybeEncodingError 2803c2807 < from multiprocessing.managers import BaseManager, BaseProxy, RemoteError --- > from multiprocess.managers import BaseManager, BaseProxy, RemoteError 3443c3447 < from multiprocessing import resource_sharer --- > from multiprocess import resource_sharer 3688c3692 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 3738c3742 < @unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory") --- > @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") 3844c3848 < # the failure when we run multiprocessing tests in parallel. --- > # the failure when we run multiprocess tests in parallel. 4086c4090 < deadline = time.monotonic() + 60 --- > deadline = getattr(time,'monotonic',time.time)() + 60 4088c4092 < while time.monotonic() < deadline: --- > while getattr(time,'monotonic',time.time)() < deadline: 4240,4242c4244,4246 < modules = ['multiprocessing.' + m for m in modules] < modules.remove('multiprocessing.__init__') < modules.append('multiprocessing') --- > modules = ['multiprocess.' + m for m in modules] > modules.remove('multiprocess.__init__') > modules.append('multiprocess') 4248,4250c4252,4254 < modules.remove('multiprocessing.popen_fork') < modules.remove('multiprocessing.popen_forkserver') < modules.remove('multiprocessing.popen_spawn_posix') --- > modules.remove('multiprocess.popen_fork') > modules.remove('multiprocess.popen_forkserver') > modules.remove('multiprocess.popen_spawn_posix') 4252c4256 < modules.remove('multiprocessing.popen_spawn_win32') --- > modules.remove('multiprocess.popen_spawn_win32') 4254c4258 < modules.remove('multiprocessing.popen_forkserver') --- > modules.remove('multiprocess.popen_forkserver') 4258c4262 < modules.remove('multiprocessing.sharedctypes') --- > modules.remove('multiprocess.sharedctypes') 4538c4542 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4578c4582 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4619c4623 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4624c4628 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4626c4630 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4634c4638 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4636c4640 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4647c4651 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4660c4664 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4662c4666 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4670c4674 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4672c4676 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4679c4683 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4681c4685 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4690c4694 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4692c4696 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 4694c4698 < t = time.monotonic() - t --- > t = getattr(time,'monotonic',time.time)() - t 4738c4742 < def test_flags(self): --- > def _test_flags(self): 4741c4745 < prog = ('from test._test_multiprocessing import TestFlags; ' + --- > prog = ('from multiprocess.tests import TestFlags; ' + 5043c5047 < def test_resource_tracker(self): --- > def _test_resource_tracker(self): 5049,5051c5053,5055 < import multiprocessing as mp < from multiprocessing import resource_tracker < from multiprocessing.shared_memory import SharedMemory --- > import multiprocess as mp > from multiprocess import resource_tracker > from multiprocess.shared_memory import SharedMemory 5095,5096c5099,5100 < deadline = time.monotonic() + 60 < while time.monotonic() < deadline: --- > deadline = getattr(time,'monotonic',time.time)() + 60 > while getattr(time,'monotonic',time.time)() < deadline: 5120c5124 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5168c5172 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5177c5181 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5321c5325 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5326c5330 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5329c5333 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " 5597c5601 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5602c5606 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5605c5609 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " # ---------------------------------------------------------------------- $ diff Python-3.8.12/Lib/test/_test_multiprocessing.py Python-3.8.13/Lib/test/_test_multiprocessing.py 3751a3752,3757 > def _new_shm_name(self, prefix): > # Add a PID to the name of a POSIX shared memory object to allow > # running multiprocessing tests (test_multiprocessing_fork, > # test_multiprocessing_spawn, etc) in parallel. > return prefix + str(os.getpid()) > 3753c3759,3760 < sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) --- > name_tsmb = self._new_shm_name('test01_tsmb') > sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) 3757c3764 < self.assertEqual(sms.name, 'test01_tsmb') --- > self.assertEqual(sms.name, name_tsmb) 3766c3773 < also_sms = shared_memory.SharedMemory('test01_tsmb') --- > also_sms = shared_memory.SharedMemory(name_tsmb) 3771c3778 < same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) --- > same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) 3781a3789,3794 > name_dblunlink = self._new_shm_name('test01_dblunlink') > sms_uno = shared_memory.SharedMemory( > name_dblunlink, > create=True, > size=5000 > ) 3783,3788d3795 < sms_uno = shared_memory.SharedMemory( < 'test01_dblunlink', < create=True, < size=5000 < ) < 3792c3799 < sms_duo = shared_memory.SharedMemory('test01_dblunlink') --- > sms_duo = shared_memory.SharedMemory(name_dblunlink) 3804c3811 < 'test01_tsmb', --- > name_tsmb, 3818c3825 < ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') --- > ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) 4006c4013,4014 < sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') --- > name_duplicate = self._new_shm_name('test03_duplicate') > sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) 4009c4017 < self.assertEqual('test03_duplicate', sl_copy.shm.name) --- > self.assertEqual(name_duplicate, sl_copy.shm.name) uqfoundation-multiprocess-b3457a5/py3.8/_multiprocess/000077500000000000000000000000001455552142400230715ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/_multiprocess/__init__.py000066400000000000000000000005011455552142400251760ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.8/doc/000077500000000000000000000000001455552142400207465ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/doc/CHANGES.html000066400000000000000000001133431455552142400227110ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.8/doc/COPYING.html000066400000000000000000000040211455552142400227410ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.8/doc/INSTALL.html000066400000000000000000000063531455552142400227510ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.8/doc/THANKS.html000066400000000000000000000017751455552142400226360ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.8/doc/__init__.py000066400000000000000000000004001455552142400230510ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.8/doc/connection-objects.html000066400000000000000000000152041455552142400254240ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.8/doc/connection-objects.txt000066400000000000000000000072761455552142400253110ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/connection-ref.html000066400000000000000000000357371455552142400245640ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.8/doc/connection-ref.txt000066400000000000000000000210001455552142400244110ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.8/doc/header.txt000066400000000000000000000003401455552142400227340ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.8/doc/html4css1.css000066400000000000000000000126361455552142400233120ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.8/doc/index.html000066400000000000000000000064761455552142400227600ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.8/doc/index.txt000066400000000000000000000021751455552142400226230ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.8/doc/intro.html000066400000000000000000000427461455552142400230040ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.8/doc/intro.txt000066400000000000000000000301551455552142400226460ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.8/doc/manager-objects.html000066400000000000000000000440461455552142400247050ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.8/doc/manager-objects.txt000066400000000000000000000235161455552142400245570ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/pool-objects.html000066400000000000000000000265511455552142400242450ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.8/doc/pool-objects.txt000066400000000000000000000136411455552142400241140ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.8/doc/process-objects.html000066400000000000000000000235741455552142400247540ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.8/doc/process-objects.txt000066400000000000000000000136131455552142400246200ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/processing-ref.html000066400000000000000000000573611455552142400245760ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.8/doc/processing-ref.txt000066400000000000000000000310141455552142400244340ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/programming-guidelines.html000066400000000000000000000214551455552142400263130ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.8/doc/programming-guidelines.txt000066400000000000000000000150221455552142400261570ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.8/doc/proxy-objects.html000066400000000000000000000175771455552142400244650ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.8/doc/proxy-objects.txt000066400000000000000000000115571455552142400243300ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/queue-objects.html000066400000000000000000000227101455552142400244110ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.8/doc/queue-objects.txt000066400000000000000000000121211455552142400242570ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.8/doc/sharedctypes.html000066400000000000000000000241571455552142400243430ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.8/doc/sharedctypes.txt000066400000000000000000000143071455552142400242120ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.8/doc/tests.html000066400000000000000000000060761455552142400230070ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.8/doc/tests.txt000066400000000000000000000027331455552142400226560ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.8/doc/version.txt000066400000000000000000000000341455552142400231710ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.8/examples/000077500000000000000000000000001455552142400220175ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/examples/__init__.py000066400000000000000000000000001455552142400241160ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/examples/benchmarks.py000066400000000000000000000131321455552142400245060ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.8/examples/ex_newtype.py000066400000000000000000000030731455552142400245630ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.8/examples/ex_pool.py000066400000000000000000000155061455552142400240450ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.8/examples/ex_synchronize.py000066400000000000000000000144041455552142400254430ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.8/examples/ex_webserver.py000066400000000000000000000041001455552142400250640ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.8/examples/ex_workers.py000066400000000000000000000042241455552142400245630ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.8/index.html000066400000000000000000000117511455552142400222030ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/000077500000000000000000000000001455552142400227325ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/__init__.py000066400000000000000000000035001455552142400250410ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/connection.py000066400000000000000000000761431455552142400254560ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/context.py000066400000000000000000000260061455552142400247740ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/dummy/000077500000000000000000000000001455552142400240655ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262130ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266040ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/forkserver.py000066400000000000000000000303601455552142400254760ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: if os.WIFSIGNALED(sts): returncode = -os.WTERMSIG(sts) else: if not os.WIFEXITED(sts): raise AssertionError( "Child {0:n} status is {1:n}".format( pid,sts)) returncode = os.WEXITSTATUS(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/heap.py000066400000000000000000000265521455552142400242330ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/managers.py000066400000000000000000001375211455552142400251120ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', 'SharedMemoryManager' ] # # Imports # import sys import threading import signal import array import queue import time import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError as second_ke: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception as e: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(*args, **kwds): ''' Create a new shared object and return its id ''' if len(args) >= 3: self, c, typeid, *args = args elif not args: raise TypeError("descriptor 'create' of 'Server' object " "needs an argument") else: if 'typeid' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) typeid = kwds.pop('typeid') if len(args) >= 2: self, c, *args = args import warnings warnings.warn("Passing 'typeid' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: if 'c' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) c = kwds.pop('c') self, *args = args import warnings warnings.warn("Passing 'c' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) args = tuple(args) with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)' def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(*args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if len(args) >= 3: typeod = args[2] elif 'typeid' in kwargs: typeid = kwargs['typeid'] elif not args: raise TypeError("descriptor 'create' of 'SharedMemoryServer' " "object needs an argument") else: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(*args, **kwargs) create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)' def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/pool.py000066400000000000000000000773751455552142400243000ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import warnings from queue import Empty # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/popen_fork.py000066400000000000000000000050051455552142400254460ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError as e: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts), "Status is {:n}".format(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267020ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400270710ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400266720ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/process.py000066400000000000000000000273371455552142400247760ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if not e.args: exitcode = 1 elif isinstance(e.args[0], int): exitcode = e.args[0] else: sys.stderr.write(str(e.args[0]) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/queues.py000066400000000000000000000270361455552142400246230ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True try: self._reader.close() finally: close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/reduction.py000066400000000000000000000226451455552142400253110ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/resource_sharer.py000066400000000000000000000123501455552142400265000ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._old_locks = [] self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() # If self._lock was locked at the time of the fork, it may be broken # -- see issue 6721. Replace it without letting it be gc'ed. self._old_locks.append(self._lock) self._lock = threading.Lock() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/resource_tracker.py000066400000000000000000000207701455552142400266540ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/shared_memory.py000066400000000000000000000420161455552142400261450ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise from .resource_tracker import register register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) size = _winapi.VirtualQuerySize(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: from .resource_tracker import unregister _posixshmem.shm_unlink(self._name) unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if sequence is not None: _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len self._allocated_bytes = tuple( self._alignment if fmt[-1] != "s" else int(fmt[:-1]) for fmt in _formats ) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) else: requested_size = 8 # Some platforms require > 0. if name is not None and sequence is None: self.shm = SharedMemory(name) else: self.shm = SharedMemory(name, create=True, size=requested_size) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_bytes) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_bytes = struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): try: offset = self._offset_data_start \ + sum(self._allocated_bytes[:position]) (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): try: offset = self._offset_data_start \ + sum(self._allocated_bytes[:position]) current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > self._allocated_bytes[position]: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( self._allocated_bytes[position], ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored values." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for metainfo on storage sizes." return f"{self._list_len}q" @property def _format_packing_metainfo(self): "The struct packing format used for the values' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the values' back transforms." return "b" * self._list_len @property def _offset_data_start(self): return (self._list_len + 1) * 8 # 8 bytes per "q" @property def _offset_packing_formats(self): return self._offset_data_start + sum(self._allocated_bytes) @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260050ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/spawn.py000066400000000000000000000221151455552142400244350ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/synchronize.py000066400000000000000000000270651455552142400256710ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/000077500000000000000000000000001455552142400240745ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/__init__.py000066400000000000000000005610231455552142400262140ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support # Skip tests if _multiprocessing wasn't built. _multiprocessing = test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None # # # # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused support.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = getattr(time,'monotonic',time.time)() try: return self.func(*args, **kwds) finally: self.elapsed = getattr(time,'monotonic',time.time)() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=5) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(test.support.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) for reason in (True, False, 8): p = self.Process(target=sys.exit, args=(reason,)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, reason) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with test.support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with test.support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = getattr(time,'monotonic',time.time)() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = getattr(time,'monotonic',time.time)() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass # bpo-30595: use a timeout of 1 second for slow buildbots self.assertTrue(q.get(timeout=1.0)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=1.0)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = getattr(time,'monotonic',time.time)() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = getattr(time,'monotonic',time.time)() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = getattr(time,'monotonic',time.time)() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with support.check_warnings(('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((test.support.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocess tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + 60 t = 0.1 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with test.support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((test.support.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = getattr(time,'monotonic',time.time)() res = wait([a, b], expected) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], expected + 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = getattr(time,'monotonic',time.time)() res = wait([a], timeout=-1) t = getattr(time,'monotonic',time.time)() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + 60 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) os.waitpid(pid, 0) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in blacklist are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, blacklist=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400261650ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400270770ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400265750ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400317610ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400332070ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271601455552142400336120ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support # Skip tests if _multiprocessing wasn't built. support.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. support.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400321440ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.8/multiprocess/util.py000066400000000000000000000331621455552142400242660ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError('address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._reset() register_after_fork(self, ForkAwareThreadLock._reset) def _reset(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/py3.9/000077500000000000000000000000001455552142400202025ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/Modules/000077500000000000000000000000001455552142400216125ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/000077500000000000000000000000001455552142400245025ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/clinic/000077500000000000000000000000001455552142400257435ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/clinic/posixshmem.c.h000066400000000000000000000077751455552142400305510ustar00rootroot00000000000000/*[clinic input] preserve [clinic start generated code]*/ #if defined(HAVE_SHM_OPEN) PyDoc_STRVAR(_posixshmem_shm_open__doc__, "shm_open($module, /, path, flags, mode=511)\n" "--\n" "\n" "Open a shared memory object. Returns a file descriptor (integer)."); #define _POSIXSHMEM_SHM_OPEN_METHODDEF \ {"shm_open", (PyCFunction)(void(*)(void))_posixshmem_shm_open, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_open__doc__}, static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode); static PyObject * _posixshmem_shm_open(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", "flags", "mode", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_open", 0}; PyObject *argsbuf[3]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2; PyObject *path; int flags; int mode = 511; int _return_value; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_open", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; if (PyFloat_Check(args[1])) { PyErr_SetString(PyExc_TypeError, "integer argument expected, got float" ); goto exit; } flags = _PyLong_AsInt(args[1]); if (flags == -1 && PyErr_Occurred()) { goto exit; } if (!noptargs) { goto skip_optional_pos; } if (PyFloat_Check(args[2])) { PyErr_SetString(PyExc_TypeError, "integer argument expected, got float" ); goto exit; } mode = _PyLong_AsInt(args[2]); if (mode == -1 && PyErr_Occurred()) { goto exit; } skip_optional_pos: _return_value = _posixshmem_shm_open_impl(module, path, flags, mode); if ((_return_value == -1) && PyErr_Occurred()) { goto exit; } return_value = PyLong_FromLong((long)_return_value); exit: return return_value; } #endif /* defined(HAVE_SHM_OPEN) */ #if defined(HAVE_SHM_UNLINK) PyDoc_STRVAR(_posixshmem_shm_unlink__doc__, "shm_unlink($module, /, path)\n" "--\n" "\n" "Remove a shared memory object (similar to unlink()).\n" "\n" "Remove a shared memory object name, and, once all processes have unmapped\n" "the object, de-allocates and destroys the contents of the associated memory\n" "region."); #define _POSIXSHMEM_SHM_UNLINK_METHODDEF \ {"shm_unlink", (PyCFunction)(void(*)(void))_posixshmem_shm_unlink, METH_FASTCALL|METH_KEYWORDS, _posixshmem_shm_unlink__doc__}, static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path); static PyObject * _posixshmem_shm_unlink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; static const char * const _keywords[] = {"path", NULL}; static _PyArg_Parser _parser = {NULL, _keywords, "shm_unlink", 0}; PyObject *argsbuf[1]; PyObject *path; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); if (!args) { goto exit; } if (!PyUnicode_Check(args[0])) { _PyArg_BadArgument("shm_unlink", "argument 'path'", "str", args[0]); goto exit; } if (PyUnicode_READY(args[0]) == -1) { goto exit; } path = args[0]; return_value = _posixshmem_shm_unlink_impl(module, path); exit: return return_value; } #endif /* defined(HAVE_SHM_UNLINK) */ #ifndef _POSIXSHMEM_SHM_OPEN_METHODDEF #define _POSIXSHMEM_SHM_OPEN_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_OPEN_METHODDEF) */ #ifndef _POSIXSHMEM_SHM_UNLINK_METHODDEF #define _POSIXSHMEM_SHM_UNLINK_METHODDEF #endif /* !defined(_POSIXSHMEM_SHM_UNLINK_METHODDEF) */ /*[clinic end generated code: output=9132861c61d8c2d8 input=a9049054013a1b77]*/ uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/multiprocess.c000066400000000000000000000125431455552142400274040ustar00rootroot00000000000000/* * Extension module used by multiprocessing package * * multiprocessing.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" /* * Function which raises exceptions based on error codes */ PyObject * _PyMp_SetError(PyObject *Type, int num) { switch (num) { #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } #ifdef MS_WINDOWS static PyObject * multiprocessing_closesocket(PyObject *self, PyObject *args) { HANDLE handle; int ret; if (!PyArg_ParseTuple(args, F_HANDLE ":closesocket" , &handle)) return NULL; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); Py_RETURN_NONE; } static PyObject * multiprocessing_recv(PyObject *self, PyObject *args) { HANDLE handle; int size, nread; PyObject *buf; if (!PyArg_ParseTuple(args, F_HANDLE "i:recv" , &handle, &size)) return NULL; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } static PyObject * multiprocessing_send(PyObject *self, PyObject *args) { HANDLE handle; Py_buffer buf; int ret, length; if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf)) return NULL; length = (int)Py_MIN(buf.len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf.buf, length, 0); Py_END_ALLOW_THREADS PyBuffer_Release(&buf); if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); return PyLong_FromLong(ret); } #endif /* * Function table */ static PyMethodDef module_methods[] = { #ifdef MS_WINDOWS {"closesocket", multiprocessing_closesocket, METH_VARARGS, ""}, {"recv", multiprocessing_recv, METH_VARARGS, ""}, {"send", multiprocessing_send, METH_VARARGS, ""}, #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) && !defined(__ANDROID__) {"sem_unlink", _PyMp_sem_unlink, METH_VARARGS, ""}, #endif {NULL} }; /* * Initialize */ static struct PyModuleDef multiprocessing_module = { PyModuleDef_HEAD_INIT, "_multiprocess", NULL, -1, module_methods, NULL, NULL, NULL, NULL }; PyMODINIT_FUNC PyInit__multiprocess(void) { PyObject *module, *temp, *value = NULL; /* Initialize module */ module = PyModule_Create(&multiprocessing_module); if (!module) return NULL; #if defined(MS_WINDOWS) || \ (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) /* Add _PyMp_SemLock type to module */ if (PyType_Ready(&_PyMp_SemLockType) < 0) return NULL; Py_INCREF(&_PyMp_SemLockType); { PyObject *py_sem_value_max; /* Some systems define SEM_VALUE_MAX as an unsigned value that * causes it to be negative when used as an int (NetBSD). * * Issue #28152: Use (0) instead of 0 to fix a warning on dead code * when using clang -Wunreachable-code. */ if ((int)(SEM_VALUE_MAX) < (0)) py_sem_value_max = PyLong_FromLong(INT_MAX); else py_sem_value_max = PyLong_FromLong(SEM_VALUE_MAX); if (py_sem_value_max == NULL) return NULL; PyDict_SetItemString(_PyMp_SemLockType.tp_dict, "SEM_VALUE_MAX", py_sem_value_max); } PyModule_AddObject(module, "SemLock", (PyObject*)&_PyMp_SemLockType); #endif /* Add configuration macros */ temp = PyDict_New(); if (!temp) return NULL; #define ADD_FLAG(name) \ value = Py_BuildValue("i", name); \ if (value == NULL) { Py_DECREF(temp); return NULL; } \ if (PyDict_SetItemString(temp, #name, value) < 0) { \ Py_DECREF(temp); Py_DECREF(value); return NULL; } \ Py_DECREF(value) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", temp) < 0) return NULL; return module; } uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/multiprocess.h000066400000000000000000000041561455552142400274120ustar00rootroot00000000000000#ifndef MULTIPROCESS_H #define MULTIPROCESS_H #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX #else # include /* O_CREAT and O_EXCL */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # include typedef sem_t *SEM_HANDLE; # endif #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_SOCKET_ERROR (-1002) #define MP_EXCEPTION_HAS_BEEN_SET (-1003) PyObject *_PyMp_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyTypeObject _PyMp_SemLockType; extern PyObject *_PyMp_sem_unlink(PyObject *ignore, PyObject *args); #endif /* MULTIPROCESS_H */ uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/posixshmem.c000066400000000000000000000057551455552142400270560ustar00rootroot00000000000000/* posixshmem - A Python extension that provides shm_open() and shm_unlink() */ #define PY_SSIZE_T_CLEAN #include // for shm_open() and shm_unlink() #ifdef HAVE_SYS_MMAN_H #include #endif /*[clinic input] module _posixshmem [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=a416734e49164bf8]*/ /* * * Module-level functions & meta stuff * */ #ifdef HAVE_SHM_OPEN /*[clinic input] _posixshmem.shm_open -> int path: unicode flags: int mode: int = 0o777 # "shm_open(path, flags, mode=0o777)\n\n\ Open a shared memory object. Returns a file descriptor (integer). [clinic start generated code]*/ static int _posixshmem_shm_open_impl(PyObject *module, PyObject *path, int flags, int mode) /*[clinic end generated code: output=8d110171a4fa20df input=e83b58fa802fac25]*/ { int fd; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return -1; } do { Py_BEGIN_ALLOW_THREADS fd = shm_open(name, flags, mode); Py_END_ALLOW_THREADS } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return -1; } return fd; } #endif /* HAVE_SHM_OPEN */ #ifdef HAVE_SHM_UNLINK /*[clinic input] _posixshmem.shm_unlink path: unicode Remove a shared memory object (similar to unlink()). Remove a shared memory object name, and, once all processes have unmapped the object, de-allocates and destroys the contents of the associated memory region. [clinic start generated code]*/ static PyObject * _posixshmem_shm_unlink_impl(PyObject *module, PyObject *path) /*[clinic end generated code: output=42f8b23d134b9ff5 input=8dc0f87143e3b300]*/ { int rv; int async_err = 0; const char *name = PyUnicode_AsUTF8(path); if (name == NULL) { return NULL; } do { Py_BEGIN_ALLOW_THREADS rv = shm_unlink(name); Py_END_ALLOW_THREADS } while (rv < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); if (rv < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path); return NULL; } Py_RETURN_NONE; } #endif /* HAVE_SHM_UNLINK */ #include "clinic/posixshmem.c.h" static PyMethodDef module_methods[ ] = { _POSIXSHMEM_SHM_OPEN_METHODDEF _POSIXSHMEM_SHM_UNLINK_METHODDEF {NULL} /* Sentinel */ }; static struct PyModuleDef this_module = { PyModuleDef_HEAD_INIT, // m_base "_posixshmem", // m_name "POSIX shared memory module", // m_doc -1, // m_size (space allocated for module globals) module_methods, // m_methods }; /* Module init function */ PyMODINIT_FUNC PyInit__posixshmem(void) { PyObject *module; module = PyModule_Create(&this_module); if (!module) { return NULL; } return module; } uqfoundation-multiprocess-b3457a5/py3.9/Modules/_multiprocess/semaphore.c000066400000000000000000000453171455552142400266430ustar00rootroot00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocess.h" enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; unsigned long last_tid; int count; int maxvalue; int kind; char *name; } SemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } static PyObject * semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1; double timeout; PyObject *timeout_obj = Py_None; DWORD res, full_msecs, nhandles; HANDLE handles[2], sigint_event; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without releasing the GIL and blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } /* prepare list of handles */ nhandles = 0; handles[nhandles++] = self->handle; if (_PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; } else { sigint_event = NULL; } /* do the wait */ Py_BEGIN_ALLOW_THREADS if (sigint_event != NULL) ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(nhandles, handles, FALSE, full_msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0 + 0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_OBJECT_0 + 1: errno = EINTR; return PyErr_SetFromErrno(PyExc_OSError); case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %u", res); return NULL; } } static PyObject * semlock_release(SemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behaviour. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif // ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save) static int sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } // endif /* !HAVE_SEM_TIMEDWAIT */ static PyObject * semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1, res, err = 0; PyObject *timeout_obj = Py_None; struct timespec deadline = {0}; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) { return NULL; } if (timeout < 0.0) { timeout = 0.0; } struct timeval now; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } long sec = (long) timeout; long nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); err = errno; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; if (res < 0 && errno == EAGAIN && blocking) { /* Couldn't acquire immediately, need to block */ do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { res = sem_wait(self->handle); } else { res = sem_timedwait(self->handle, &deadline); } Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } if (res < 0) { errno = err; if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } static PyObject * semlock_release(SemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { SemLockObject *self; self = PyObject_New(SemLockObject, type); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } static PyObject * semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { SEM_HANDLE handle = SEM_FAILED; int kind, maxvalue, value, unlink; PyObject *result; char *name, *name_copy = NULL; static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) { return PyErr_NoMemory(); } strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); if (!PyErr_Occurred()) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); } return NULL; } static PyObject * semlock_rebuild(PyTypeObject *type, PyObject *args) { SEM_HANDLE handle; int kind, maxvalue; char *name, *name_copy = NULL; if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", &handle, &kind, &maxvalue, &name)) return NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyMem_Free(name_copy); return PyErr_SetFromErrno(PyExc_OSError); } } #endif return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void semlock_dealloc(SemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); PyObject_Del(self); } static PyObject * semlock_count(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { return PyLong_FromLong((long)self->count); } static PyObject * semlock_ismine(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } static PyObject * semlock_getvalue(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyLong_FromLong((long)sval); #endif } static PyObject * semlock_iszero(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return _PyMp_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return _PyMp_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } static PyObject * semlock_afterfork(SemLockObject *self, PyObject *Py_UNUSED(ignored)) { self->count = 0; Py_RETURN_NONE; } /* * Semaphore methods */ static PyMethodDef semlock_methods[] = { {"acquire", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, "acquire the semaphore/lock"}, {"release", (PyCFunction)semlock_release, METH_NOARGS, "release the semaphore/lock"}, {"__enter__", (PyCFunction)(void(*)(void))semlock_acquire, METH_VARARGS | METH_KEYWORDS, "enter the semaphore/lock"}, {"__exit__", (PyCFunction)semlock_release, METH_VARARGS, "exit the semaphore/lock"}, {"_count", (PyCFunction)semlock_count, METH_NOARGS, "num of `acquire()`s minus num of `release()`s for this process"}, {"_is_mine", (PyCFunction)semlock_ismine, METH_NOARGS, "whether the lock is owned by this thread"}, {"_get_value", (PyCFunction)semlock_getvalue, METH_NOARGS, "get the value of the semaphore"}, {"_is_zero", (PyCFunction)semlock_iszero, METH_NOARGS, "returns whether semaphore has value zero"}, {"_rebuild", (PyCFunction)semlock_rebuild, METH_VARARGS | METH_CLASS, ""}, {"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, {NULL} }; /* * Member table */ static PyMemberDef semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(SemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(SemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject _PyMp_SemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_multiprocess.SemLock", /* tp_basicsize */ sizeof(SemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)semlock_dealloc, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ semlock_methods, /* tp_members */ semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ semlock_new, }; /* * Function to unlink semaphore names */ PyObject * _PyMp_sem_unlink(PyObject *ignore, PyObject *args) { char *name; if (!PyArg_ParseTuple(args, "s", &name)) return NULL; if (SEM_UNLINK(name) < 0) { _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } uqfoundation-multiprocess-b3457a5/py3.9/README_MODS000066400000000000000000001044461455552142400217150ustar00rootroot00000000000000cp -rf py3.8/examples . cp -rf py3.8/doc . cp -f py3.8/index.html . cp -rf py3.8/_multiprocess _multiprocess cp -rf py3.8/multiprocess multiprocess cp -rf py3.8/Modules Modules cp Python-3.9.2/Lib/test/mp_*py multiprocess/tests cp Python-3.9.2/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py # ---------------------------------------------------------------------- diff Python-3.8.1/Modules/_multiprocessing/multiprocessing.h Python-3.9.0a2/Modules/_multiprocessing/multiprocessing.h 30,37d29 < # define HANDLE int < # define SOCKET int < # define BOOL int < # define UINT32 uint32_t < # define INT32 int32_t < # define TRUE 1 < # define FALSE 0 < # define INVALID_HANDLE_VALUE (-1) 75,76d66 < # define F_DWORD "k" < # define T_DWORD T_ULONG # ---------------------------------------------------------------------- diff Python-3.8.1/Lib/multiprocessing/managers.py Python-3.9.0a2/Lib/multiprocessing/managers.py 251c251 < except KeyError as second_ke: --- > except KeyError: 299c299 < except Exception as e: --- > except Exception: 363c363 < def create(*args, **kwds): --- > def create(self, c, typeid, /, *args, **kwds): 367,392d366 < if len(args) >= 3: < self, c, typeid, *args = args < elif not args: < raise TypeError("descriptor 'create' of 'Server' object " < "needs an argument") < else: < if 'typeid' not in kwds: < raise TypeError('create expected at least 2 positional ' < 'arguments, got %d' % (len(args)-1)) < typeid = kwds.pop('typeid') < if len(args) >= 2: < self, c, *args = args < import warnings < warnings.warn("Passing 'typeid' as keyword argument is deprecated", < DeprecationWarning, stacklevel=2) < else: < if 'c' not in kwds: < raise TypeError('create expected at least 2 positional ' < 'arguments, got %d' % (len(args)-1)) < c = kwds.pop('c') < self, *args = args < import warnings < warnings.warn("Passing 'c' as keyword argument is deprecated", < DeprecationWarning, stacklevel=2) < args = tuple(args) < 424d397 < create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)' 1296c1269 < def create(*args, **kwargs): --- > def create(self, c, typeid, /, *args, **kwargs): 1302,1311d1274 < if len(args) >= 3: < typeod = args[2] < elif 'typeid' in kwargs: < typeid = kwargs['typeid'] < elif not args: < raise TypeError("descriptor 'create' of 'SharedMemoryServer' " < "object needs an argument") < else: < raise TypeError('create expected at least 2 positional ' < 'arguments, got %d' % (len(args)-1)) 1314,1315c1277 < return Server.create(*args, **kwargs) < create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)' --- > return Server.create(self, c, typeid, *args, **kwargs) diff Python-3.8.1/Lib/multiprocessing/popen_fork.py Python-3.9.0a2/Lib/multiprocessing/popen_fork.py 28c28 < except OSError as e: --- > except OSError: diff Python-3.8.1/Lib/multiprocessing/util.py Python-3.9.0a2/Lib/multiprocessing/util.py 432c432 < False, False, None) --- > False, False, None, None, None, -1, None) # ---------------------------------------------------------------------- diff Python-3.9.0a2/Modules/_multiprocessing/posixshmem.c Python-3.9.0b1/Modules/_multiprocessing/posixshmem.c 8d7 < #include "structmember.h" diff Python-3.9.0a2/Modules/_multiprocessing/semaphore.c Python-3.9.0b1/Modules/_multiprocessing/semaphore.c 271d270 < double timeout; 274,275d272 < struct timeval now; < long sec, nsec; 288,290c285,288 < if (timeout_obj != Py_None) { < timeout = PyFloat_AsDouble(timeout_obj); < if (PyErr_Occurred()) --- > int use_deadline = (timeout_obj != Py_None); > if (use_deadline) { > double timeout = PyFloat_AsDouble(timeout_obj); > if (PyErr_Occurred()) { 292c290,291 < if (timeout < 0.0) --- > } > if (timeout < 0.0) { 293a293 > } 294a295 > struct timeval now; 299,300c300,301 < sec = (long) timeout; < nsec = (long) (1e9 * (timeout - sec) + 0.5); --- > long sec = (long) timeout; > long nsec = (long) (1e9 * (timeout - sec) + 0.5); 318c319 < if (timeout_obj == Py_None) { --- > if (!use_deadline) { diff Python-3.9.0a2/Lib/multiprocessing/connection.py Python-3.9.0b1/Lib/multiprocessing/connection.py 75a76,80 > # Prefer abstract sockets if possible to avoid problems with the address > # size. When coding portable applications, some implementations have > # sun_path as short as 92 bytes in the sockaddr_un struct. > if util.abstract_sockets_supported: > return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" 105c110 < elif type(address) is str: --- > elif type(address) is str or util.is_abstract_socket_namespace(address): 600c605,606 < if family == 'AF_UNIX': --- > if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): > # Linux abstract socket namespaces do not need to be explicitly unlinked diff Python-3.9.0a2/Lib/multiprocessing/forkserver.py Python-3.9.0b1/Lib/multiprocessing/forkserver.py 58c58,59 < os.unlink(self._forkserver_address) --- > if not util.is_abstract_socket_namespace(self._forkserver_address): > os.unlink(self._forkserver_address) 138c139,140 < os.chmod(address, 0o600) --- > if not util.is_abstract_socket_namespace(address): > os.chmod(address, 0o600) 238,245c240,241 < if os.WIFSIGNALED(sts): < returncode = -os.WTERMSIG(sts) < else: < if not os.WIFEXITED(sts): < raise AssertionError( < "Child {0:n} status is {1:n}".format( < pid,sts)) < returncode = os.WEXITSTATUS(sts) --- > returncode = os.waitstatus_to_exitcode(sts) > diff Python-3.9.0a2/Lib/multiprocessing/managers.py Python-3.9.0b1/Lib/multiprocessing/managers.py 23a24 > import types 62c63 < Type to uniquely indentify a shared object --- > Type to uniquely identify a shared object 797c798 < Try to call a method of the referrent and return a copy of the result --- > Try to call a method of the referent and return a copy of the result 1131a1133,1134 > __class_getitem__ = classmethod(types.GenericAlias) > 1264a1268,1271 > address = self.address > # The address of Linux abstract namespaces can be bytes > if isinstance(address, bytes): > address = os.fsdecode(address) 1266c1273 < _SharedMemoryTracker(f"shmm_{self.address}_{getpid()}") --- > _SharedMemoryTracker(f"shm_{address}_{getpid()}") diff Python-3.9.0a2/Lib/multiprocessing/pool.py Python-3.9.0b1/Lib/multiprocessing/pool.py 22a23 > import types 24d24 < from queue import Empty 654,655d653 < self._worker_handler._state = TERMINATE < self._change_notifier.put(None) 684a683,685 > # Notify that the worker_handler state has been changed so the > # _handle_workers loop can be unblocked (and exited) in order to > # send the finalization sentinel all the workers. 685a687,688 > change_notifier.put(None) > 779a783,784 > __class_getitem__ = classmethod(types.GenericAlias) > diff Python-3.9.0a2/Lib/multiprocessing/popen_fork.py Python-3.9.0b1/Lib/multiprocessing/popen_fork.py 33,37c33 < if os.WIFSIGNALED(sts): < self.returncode = -os.WTERMSIG(sts) < else: < assert os.WIFEXITED(sts), "Status is {:n}".format(sts) < self.returncode = os.WEXITSTATUS(sts) --- > self.returncode = os.waitstatus_to_exitcode(sts) diff Python-3.9.0a2/Lib/multiprocessing/process.py Python-3.9.0b1/Lib/multiprocessing/process.py 320,323c320,323 < if not e.args: < exitcode = 1 < elif isinstance(e.args[0], int): < exitcode = e.args[0] --- > if e.code is None: > exitcode = 0 > elif isinstance(e.code, int): > exitcode = e.code 325c325 < sys.stderr.write(str(e.args[0]) + '\n') --- > sys.stderr.write(str(e.code) + '\n') diff Python-3.9.0a2/Lib/multiprocessing/queues.py Python-3.9.0b1/Lib/multiprocessing/queues.py 16a17 > import types 51,52c52 < < self._after_fork() --- > self._reset() 65c65 < self._after_fork() --- > self._reset() 69c69,75 < self._notempty = threading.Condition(threading.Lock()) --- > self._reset(after_fork=True) > > def _reset(self, after_fork=False): > if after_fork: > self._notempty._at_fork_reinit() > else: > self._notempty = threading.Condition(threading.Lock()) 342a349,352 > def close(self): > self._reader.close() > self._writer.close() > 368a379,380 > > __class_getitem__ = classmethod(types.GenericAlias) diff Python-3.9.0a2/Lib/multiprocessing/resource_sharer.py Python-3.9.0b1/Lib/multiprocessing/resource_sharer.py 66d65 < self._old_locks = [] 116,119c115 < # If self._lock was locked at the time of the fork, it may be broken < # -- see issue 6721. Replace it without letting it be gc'ed. < self._old_locks.append(self._lock) < self._lock = threading.Lock() --- > self._lock._at_fork_reinit() diff Python-3.9.0a2/Lib/multiprocessing/shared_memory.py Python-3.9.0b1/Lib/multiprocessing/shared_memory.py 16a17 > import types 253a255,263 > # The shared memory area is organized as follows: > # - 8 bytes: number of items (N) as a 64-bit integer > # - (N + 1) * 8 bytes: offsets of each element from the start of the > # data area > # - K bytes: the data area storing item values (with encoding and size > # depending on their respective types) > # - N * 8 bytes: `struct` format string for each element > # - N bytes: index into _back_transforms_mapping for each element > # (for reconstructing the corresponding Python value) 285c295,296 < if sequence is not None: --- > if name is None or sequence is not None: > sequence = sequence or () 296,299c307,314 < self._allocated_bytes = tuple( < self._alignment if fmt[-1] != "s" else int(fmt[:-1]) < for fmt in _formats < ) --- > offset = 0 > # The offsets of each list element into the shared memory's > # data area (0 meaning the start of the data area, not the start > # of the shared memory area). > self._allocated_offsets = [0] > for fmt in _formats: > offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) > self._allocated_offsets.append(offset) 309a325 > self.shm = SharedMemory(name, create=True, size=requested_size) 311,313d326 < requested_size = 8 # Some platforms require > 0. < < if name is not None and sequence is None: 315,316d327 < else: < self.shm = SharedMemory(name, create=True, size=requested_size) 325c336 < *(self._allocated_bytes) --- > *(self._allocated_offsets) 348,351c359,364 < self._allocated_bytes = struct.unpack_from( < self._format_size_metainfo, < self.shm.buf, < 1 * 8 --- > self._allocated_offsets = list( > struct.unpack_from( > self._format_size_metainfo, > self.shm.buf, > 1 * 8 > ) 373d385 < position = position if position >= 0 else position + self._list_len 390d401 < position = position if position >= 0 else position + self._list_len 409a421 > position = position if position >= 0 else position + self._list_len 411,412c423 < offset = self._offset_data_start \ < + sum(self._allocated_bytes[:position]) --- > offset = self._offset_data_start + self._allocated_offsets[position] 426a438 > position = position if position >= 0 else position + self._list_len 428,429c440,441 < offset = self._offset_data_start \ < + sum(self._allocated_bytes[:position]) --- > item_offset = self._allocated_offsets[position] > offset = self._offset_data_start + item_offset 435a448 > encoded_value = value 437,438c450,455 < if len(value) > self._allocated_bytes[position]: < raise ValueError("exceeds available storage for existing str") --- > allocated_length = self._allocated_offsets[position + 1] - item_offset > > encoded_value = (value.encode(_encoding) > if isinstance(value, str) else value) > if len(encoded_value) > allocated_length: > raise ValueError("bytes/str item exceeds available storage") 443c460 < self._allocated_bytes[position], --- > allocated_length, 451,452c468 < value = value.encode(_encoding) if isinstance(value, str) else value < struct.pack_into(new_format, self.shm.buf, offset, value) --- > struct.pack_into(new_format, self.shm.buf, offset, encoded_value) 465c481 < "The struct packing format used by all currently stored values." --- > "The struct packing format used by all currently stored items." 472,473c488,489 < "The struct packing format used for metainfo on storage sizes." < return f"{self._list_len}q" --- > "The struct packing format used for the items' storage offsets." > return "q" * (self._list_len + 1) 477c493 < "The struct packing format used for the values' packing formats." --- > "The struct packing format used for the items' packing formats." 482c498 < "The struct packing format used for the values' back transforms." --- > "The struct packing format used for the items' back transforms." 487c503,505 < return (self._list_len + 1) * 8 # 8 bytes per "q" --- > # - 8 bytes for the list length > # - (N + 1) * 8 bytes for the element offsets > return (self._list_len + 2) * 8 491c509 < return self._offset_data_start + sum(self._allocated_bytes) --- > return self._offset_data_start + self._allocated_offsets[-1] 510a529,530 > > __class_getitem__ = classmethod(types.GenericAlias) diff Python-3.9.0a2/Lib/multiprocessing/spawn.py Python-3.9.0b1/Lib/multiprocessing/spawn.py 39c39 < _python_exe = sys._base_executable --- > _python_exe = sys.executable diff Python-3.9.0a2/Lib/multiprocessing/util.py Python-3.9.0b1/Lib/multiprocessing/util.py 104a105,127 > > # Abstract socket support > > def _platform_supports_abstract_sockets(): > if sys.platform == "linux": > return True > if hasattr(sys, 'getandroidapilevel'): > return True > return False > > > def is_abstract_socket_namespace(address): > if not address: > return False > if isinstance(address, bytes): > return address[0] == 0 > elif isinstance(address, str): > return address[0] == "\0" > raise TypeError('address type of {address!r} unrecognized') > > > abstract_sockets_supported = _platform_supports_abstract_sockets() > 347,350d369 < self._reset() < register_after_fork(self, ForkAwareThreadLock._reset) < < def _reset(self): 353a373,376 > register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) > > def _at_fork_reinit(self): > self._lock._at_fork_reinit() # ---------------------------------------------------------------------- diff Python-3.9.0b1/Lib/multiprocessing/context.py Python-3.9.0/Lib/multiprocessing/context.py 259a260 > methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] 261,263c262,264 < return ['fork', 'spawn', 'forkserver'] < else: < return ['fork', 'spawn'] --- > methods.append('forkserver') > return methods > Common subdirectories: Python-3.9.0b1/Lib/multiprocessing/dummy and Python-3.9.0/Lib/multiprocessing/dummy diff Python-3.9.0b1/Lib/multiprocessing/shared_memory.py Python-3.9.0/Lib/multiprocessing/shared_memory.py 78a79,80 > if size == 0: > raise ValueError("'size' must be a positive number different from zero") diff Python-3.9.0b1/Lib/multiprocessing/synchronize.py Python-3.9.0/Lib/multiprocessing/synchronize.py 273c273 < False), ('notify: Should not have been able to acquire' --- > False), ('notify: Should not have been able to acquire ' # ---------------------------------------------------------------------- diff Python-3.9.2/Lib/test/_test_multiprocessing.py multiprocess/tests/__init__.py 23c23 < import pickle --- > import pickle #XXX: use dill? 36c36 < support.skip_if_broken_multiprocessing_synchronize() --- > test.support.import_module('multiprocess.synchronize') 39,44c39,45 < import multiprocessing.connection < import multiprocessing.dummy < import multiprocessing.heap < import multiprocessing.managers < import multiprocessing.pool < import multiprocessing.queues --- > import multiprocess as multiprocessing > import multiprocess.connection > import multiprocess.dummy > import multiprocess.heap > import multiprocess.managers > import multiprocess.pool > import multiprocess.queues 46c47 < from multiprocessing import util --- > from multiprocess import util 49c50 < from multiprocessing import reduction --- > from multiprocess import reduction 55c56 < from multiprocessing.sharedctypes import Value, copy --- > from multiprocess.sharedctypes import Value, copy 61c62 < from multiprocessing import shared_memory --- > from multiprocess import shared_memory 89c90 < from multiprocessing import resource_tracker --- > from multiprocess import resource_tracker 117c118 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 130c131 < PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver'] --- > PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] 169c170 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 173c174 < self.elapsed = time.monotonic() - t --- > self.elapsed = getattr(time,'monotonic',time.time)() - t 285c286 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 288c289 < def test_parent_process(self): --- > def _test_parent_process(self): 321c322 < from multiprocessing.process import parent_process --- > from multiprocess.process import parent_process 488a490 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 740c742 < from multiprocessing.forkserver import _forkserver --- > from multiprocess.forkserver import _forkserver 829c831 < self.assertIn("test_multiprocessing.py", err) --- > self.assertIn("__init__.py", err) 1119c1121 < import multiprocessing --- > import multiprocess as multiprocessing 1137c1139 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 1139c1141 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 1541c1543 < dt = time.monotonic() --- > dt = getattr(time,'monotonic',time.time)() 1543c1545 < dt = time.monotonic() - dt --- > dt = getattr(time,'monotonic',time.time)() - dt 2012c2014 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 2578a2581 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2620a2624 > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 2634c2638 < t_start = time.monotonic() --- > t_start = getattr(time,'monotonic',time.time)() 2646c2650 < self.assertGreater(time.monotonic() - t_start, 0.9) --- > self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) 2718,2719c2722,2723 < def test_unpickleable_result(self): < from multiprocessing.pool import MaybeEncodingError --- > def _test_unpickleable_result(self): > from multiprocess.pool import MaybeEncodingError 2807c2811 < from multiprocessing.managers import BaseManager, BaseProxy, RemoteError --- > from multiprocess.managers import BaseManager, BaseProxy, RemoteError 3450c3454 < from multiprocessing import resource_sharer --- > from multiprocess import resource_sharer 3695c3699 < self.skipTest("requires multiprocessing.sharedctypes") --- > self.skipTest("requires multiprocess.sharedctypes") 3745c3749 < @unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory") --- > @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") 4123c4127 < deadline = time.monotonic() + support.LONG_TIMEOUT --- > deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT 4125c4129 < while time.monotonic() < deadline: --- > while getattr(time,'monotonic',time.time)() < deadline: 4277,4279c4281,4283 < modules = ['multiprocessing.' + m for m in modules] < modules.remove('multiprocessing.__init__') < modules.append('multiprocessing') --- > modules = ['multiprocess.' + m for m in modules] > modules.remove('multiprocess.__init__') > modules.append('multiprocess') 4285,4287c4289,4291 < modules.remove('multiprocessing.popen_fork') < modules.remove('multiprocessing.popen_forkserver') < modules.remove('multiprocessing.popen_spawn_posix') --- > modules.remove('multiprocess.popen_fork') > modules.remove('multiprocess.popen_forkserver') > modules.remove('multiprocess.popen_spawn_posix') 4289c4293 < modules.remove('multiprocessing.popen_spawn_win32') --- > modules.remove('multiprocess.popen_spawn_win32') 4291c4295 < modules.remove('multiprocessing.popen_forkserver') --- > modules.remove('multiprocess.popen_forkserver') 4295c4299 < modules.remove('multiprocessing.sharedctypes') --- > modules.remove('multiprocess.sharedctypes') 4577c4581 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4617c4621 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4658c4662 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4663c4667 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4665c4669 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4673c4677 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4675c4679 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4686c4690 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4699c4703 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4701c4705 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4709c4713 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4711c4715 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4718c4722 < start = time.monotonic() --- > start = getattr(time,'monotonic',time.time)() 4720c4724 < delta = time.monotonic() - start --- > delta = getattr(time,'monotonic',time.time)() - start 4729c4733 < from multiprocessing.connection import wait --- > from multiprocess.connection import wait 4731c4735 < t = time.monotonic() --- > t = getattr(time,'monotonic',time.time)() 4733c4737 < t = time.monotonic() - t --- > t = getattr(time,'monotonic',time.time)() - t 4777c4781 < def test_flags(self): --- > def _test_flags(self): 4780c4784 < prog = ('from test._test_multiprocessing import TestFlags; ' + --- > prog = ('from multiprocess.tests import TestFlags; ' + 5082c5086 < def test_resource_tracker(self): --- > def _test_resource_tracker(self): 5088,5090c5092,5094 < import multiprocessing as mp < from multiprocessing import resource_tracker < from multiprocessing.shared_memory import SharedMemory --- > import multiprocess as mp > from multiprocess import resource_tracker > from multiprocess.shared_memory import SharedMemory 5134,5135c5138,5139 < deadline = time.monotonic() + support.LONG_TIMEOUT < while time.monotonic() < deadline: --- > deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT > while getattr(time,'monotonic',time.time)() < deadline: 5159c5163 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5207c5211 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5216c5220 < from multiprocessing.resource_tracker import _resource_tracker --- > from multiprocess.resource_tracker import _resource_tracker 5375c5379 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5380c5384 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5383c5387 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " 5651c5655 < start_time = time.monotonic() --- > start_time = getattr(time,'monotonic',time.time)() 5656c5660 < dt = time.monotonic() - start_time --- > dt = getattr(time,'monotonic',time.time)() - start_time 5659c5663 < support.print_warning(f"multiprocessing.Manager still has " --- > support.print_warning(f"multiprocess.Manager still has " # ---------------------------------------------------------------------- diff Python-3.9.4/Lib/test/_test_multiprocessing.py Python-3.9.7/Lib/test/_test_multiprocessing.py 609a610 > gc.collect() # For PyPy or other GCs. 2284a2286,2295 > def test_nested_queue(self): > a = self.list() # Test queue inside list > a.append(self.Queue()) > a[0].put(123) > self.assertEqual(a[0].get(), 123) > b = self.dict() # Test queue inside dict > b[0] = self.Queue() > b[0].put(456) > self.assertEqual(b[0].get(), 456) > 2655a2667 > gc.collect() # For PyPy or other GCs. 4157a4170 > gc.collect() # For PyPy or other GCs. 4168a4182 > gc.collect() # For PyPy or other GCs. 4174a4189 > gc.collect() # For PyPy or other GCs. # ---------------------------------------------------------------------- diff Python-3.9.4/Lib/multiprocessing/managers.py Python-3.9.7/Lib/multiprocessing/managers.py 11,12c11 < __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', < 'SharedMemoryManager' ] --- > __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] 38d36 < HAS_SHMEM = True 40a39,41 > else: > HAS_SHMEM = True > __all__.append('SharedMemoryManager') 962c963 < exposed=None, incref=True): --- > exposed=None, incref=True, manager_owned=False): 982c983 < incref=incref) --- > incref=incref, manager_owned=manager_owned) # ---------------------------------------------------------------------- dude@borel>$ diff Python-3.9.7/Lib/test/_test_multiprocessing.py Python-3.9.9/Lib/test/_test_multiprocessing.py 3771a3772,3778 > def _new_shm_name(self, prefix): > # Add a PID to the name of a POSIX shared memory object to allow > # running multiprocessing tests (test_multiprocessing_fork, > # test_multiprocessing_spawn, etc) in parallel. > return prefix + str(os.getpid()) > > @unittest.skipIf(sys.platform == "win32", "test is broken on Windows") > @unittest.skipIf(True, "fails with is_dill(obj, child=True)") 3773c3780,3781 < sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) --- > name_tsmb = self._new_shm_name('test01_tsmb') > sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) 3777c3785 < self.assertEqual(sms.name, 'test01_tsmb') --- > self.assertEqual(sms.name, name_tsmb) 3786c3794 < also_sms = shared_memory.SharedMemory('test01_tsmb') --- > also_sms = shared_memory.SharedMemory(name_tsmb) 3791c3799 < same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) --- > same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) 3809c3817 < names = ['test01_fn', 'test02_fn'] --- > names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] 3830a3839,3844 > name_dblunlink = self._new_shm_name('test01_dblunlink') > sms_uno = shared_memory.SharedMemory( > name_dblunlink, > create=True, > size=5000 > ) 3832,3837d3845 < sms_uno = shared_memory.SharedMemory( < 'test01_dblunlink', < create=True, < size=5000 < ) < 3841c3849 < sms_duo = shared_memory.SharedMemory('test01_dblunlink') --- > sms_duo = shared_memory.SharedMemory(name_dblunlink) 3853c3861 < 'test01_tsmb', --- > name_tsmb, 3867c3875 < ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') --- > ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) 4055c4063,4064 < sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') --- > name_duplicate = self._new_shm_name('test03_duplicate') > sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) 4058c4067 < self.assertEqual('test03_duplicate', sl_copy.shm.name) --- > self.assertEqual(name_duplicate, sl_copy.shm.name) 4148a4158,4164 > # Without this line it was raising warnings like: > # UserWarning: resource_tracker: > # There appear to be 1 leaked shared_memory > # objects to clean up at shutdown > # See: https://bugs.python.org/issue45209 > resource_tracker.unregister(f"/{name}", "shared_memory") > # ---------------------------------------------------------------------- $ diff Python-3.9.10/Lib/test/_test_multiprocessing.py Python-3.9.12/Lib/test/_test_multiprocessing.py 71a72,77 > if support.check_sanitizer(address=True): > # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to > # work around a libasan race condition: dead lock in pthread_create(). > raise unittest.SkipTest("libasan has a pthread_create() dead lock") > > # ---------------------------------------------------------------------- diff Python-3.9.12/Lib/multiprocessing/managers.py Python-3.9.13/Lib/multiprocessing/managers.py 672c672 < process.join(timeout=0.1) --- > process.join(timeout=1.0) diff Python-3.9.12/Lib/multiprocessing/queues.py Python-3.9.13/Lib/multiprocessing/queues.py 142,148c142,145 < try: < self._reader.close() < finally: < close = self._close < if close: < self._close = None < close() --- > close = self._close > if close: > self._close = None > close() 172,173c169,171 < self._wlock, self._writer.close, self._ignore_epipe, < self._on_queue_feeder_error, self._sem), --- > self._wlock, self._reader.close, self._writer.close, > self._ignore_epipe, self._on_queue_feeder_error, > self._sem), 214,215c212,213 < def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, < onerror, queue_sem): --- > def _feed(buffer, notempty, send_bytes, writelock, reader_close, > writer_close, ignore_epipe, onerror, queue_sem): 241c239,240 < close() --- > reader_close() > writer_close() diff Python-3.9.12/Lib/multiprocessing/util.py Python-3.9.13/Lib/multiprocessing/util.py 123c123 < raise TypeError('address type of {address!r} unrecognized') --- > raise TypeError(f'address type of {address!r} unrecognized') # ---------------------------------------------------------------------- diff Python-3.9.13/Lib/multiprocessing/connection.py Python-3.9.16/Lib/multiprocessing/connection.py 76,80d75 < # Prefer abstract sockets if possible to avoid problems with the address < # size. When coding portable applications, some implementations have < # sun_path as short as 92 bytes in the sockaddr_un struct. < if util.abstract_sockets_supported: < return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" uqfoundation-multiprocess-b3457a5/py3.9/_multiprocess/000077500000000000000000000000001455552142400230725ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/_multiprocess/__init__.py000066400000000000000000000005011455552142400251770ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/py3.9/doc/000077500000000000000000000000001455552142400207475ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/doc/CHANGES.html000066400000000000000000001133431455552142400227120ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/py3.9/doc/COPYING.html000066400000000000000000000040211455552142400227420ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/py3.9/doc/INSTALL.html000066400000000000000000000063531455552142400227520ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/py3.9/doc/THANKS.html000066400000000000000000000017751455552142400226370ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/py3.9/doc/__init__.py000066400000000000000000000004001455552142400230520ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/py3.9/doc/connection-objects.html000066400000000000000000000152041455552142400254250ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/py3.9/doc/connection-objects.txt000066400000000000000000000072761455552142400253120ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/connection-ref.html000066400000000000000000000357371455552142400245650ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/py3.9/doc/connection-ref.txt000066400000000000000000000210001455552142400244120ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/py3.9/doc/header.txt000066400000000000000000000003401455552142400227350ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/py3.9/doc/html4css1.css000066400000000000000000000126361455552142400233130ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/py3.9/doc/index.html000066400000000000000000000064761455552142400227610ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/py3.9/doc/index.txt000066400000000000000000000021751455552142400226240ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/py3.9/doc/intro.html000066400000000000000000000427461455552142400230050ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/py3.9/doc/intro.txt000066400000000000000000000301551455552142400226470ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/py3.9/doc/manager-objects.html000066400000000000000000000440461455552142400247060ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/py3.9/doc/manager-objects.txt000066400000000000000000000235161455552142400245600ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/pool-objects.html000066400000000000000000000265511455552142400242460ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/py3.9/doc/pool-objects.txt000066400000000000000000000136411455552142400241150ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/py3.9/doc/process-objects.html000066400000000000000000000235741455552142400247550ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/py3.9/doc/process-objects.txt000066400000000000000000000136131455552142400246210ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/processing-ref.html000066400000000000000000000573611455552142400245770ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/py3.9/doc/processing-ref.txt000066400000000000000000000310141455552142400244350ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/programming-guidelines.html000066400000000000000000000214551455552142400263140ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/py3.9/doc/programming-guidelines.txt000066400000000000000000000150221455552142400261600ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.9/doc/proxy-objects.html000066400000000000000000000175771455552142400244660ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/py3.9/doc/proxy-objects.txt000066400000000000000000000115571455552142400243310ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/queue-objects.html000066400000000000000000000227101455552142400244120ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/py3.9/doc/queue-objects.txt000066400000000000000000000121211455552142400242600ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/py3.9/doc/sharedctypes.html000066400000000000000000000241571455552142400243440ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/py3.9/doc/sharedctypes.txt000066400000000000000000000143071455552142400242130ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/py3.9/doc/tests.html000066400000000000000000000060761455552142400230100ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/py3.9/doc/tests.txt000066400000000000000000000027331455552142400226570ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/py3.9/doc/version.txt000066400000000000000000000000341455552142400231720ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/py3.9/examples/000077500000000000000000000000001455552142400220205ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/examples/__init__.py000066400000000000000000000000001455552142400241170ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/examples/benchmarks.py000066400000000000000000000131321455552142400245070ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.9/examples/ex_newtype.py000066400000000000000000000030731455552142400245640ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.9/examples/ex_pool.py000066400000000000000000000155061455552142400240460ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.9/examples/ex_synchronize.py000066400000000000000000000144041455552142400254440ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/py3.9/examples/ex_webserver.py000066400000000000000000000041001455552142400250650ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.9/examples/ex_workers.py000066400000000000000000000042241455552142400245640ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/py3.9/index.html000066400000000000000000000117511455552142400222040ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/000077500000000000000000000000001455552142400227335ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/__init__.py000066400000000000000000000035001455552142400250420ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/connection.py000066400000000000000000000761431455552142400254570ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/context.py000066400000000000000000000260061455552142400247750ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/dummy/000077500000000000000000000000001455552142400240665ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400262140ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400266050ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/forkserver.py000066400000000000000000000275521455552142400255100ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/heap.py000066400000000000000000000265521455552142400242340ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/managers.py000066400000000000000000001344031455552142400251070ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/pool.py000066400000000000000000000774531455552142400242760ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/popen_fork.py000066400000000000000000000045061455552142400254540ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400267030ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400270720ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400266730ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/process.py000066400000000000000000000273321455552142400247720ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/queues.py000066400000000000000000000275531455552142400246300ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/reduction.py000066400000000000000000000226451455552142400253120ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400264760ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/resource_tracker.py000066400000000000000000000207701455552142400266550ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/shared_memory.py000066400000000000000000000437341455552142400261560ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise from .resource_tracker import register register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) size = _winapi.VirtualQuerySize(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: from .resource_tracker import unregister _posixshmem.shm_unlink(self._name) unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400260060ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/spawn.py000066400000000000000000000221151455552142400244360ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/synchronize.py000066400000000000000000000270651455552142400256720ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/000077500000000000000000000000001455552142400240755ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/__init__.py000066400000000000000000005706161455552142400262250ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import socket_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock") # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused support.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = getattr(time,'monotonic',time.time)() try: return self.func(*args, **kwds) finally: self.elapsed = getattr(time,'monotonic',time.time)() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() gc.collect() # For PyPy or other GCs. self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(test.support.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with test.support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with test.support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = getattr(time,'monotonic',time.time)() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = getattr(time,'monotonic',time.time)() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = getattr(time,'monotonic',time.time)() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = getattr(time,'monotonic',time.time)() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_nested_queue(self): a = self.list() # Test queue inside list a.append(self.Queue()) a[0].put(123) self.assertEqual(a[0].get(), 123) b = self.dict() # Test queue inside dict b[0] = self.Queue() b[0].put(456) self.assertEqual(b[0].get(), 456) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = getattr(time,'monotonic',time.time)() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs gc.collect() # For PyPy or other GCs. time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with support.check_warnings(('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue @hashlib_helper.requires_hashdigest('md5') class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) @unittest.skipIf(sys.platform == "win32", "test is broken on Windows") @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocessing.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT t = 0.1 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): gc.collect() # For PyPy or other GCs. self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with test.support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = getattr(time,'monotonic',time.time)() res = wait([a, b], expected) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], expected + 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = getattr(time,'monotonic',time.time)() res = wait([a], timeout=-1) t = getattr(time,'monotonic',time.time)() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT while getattr(time,'monotonic',time.time)() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in blacklist are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, blacklist=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400261660ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400271000ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400265760ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400317620ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400332100ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271601455552142400336130ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support # Skip tests if _multiprocessing wasn't built. support.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. support.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400321450ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/py3.9/multiprocess/util.py000066400000000000000000000332521455552142400242670ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None, None, None, -1, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/pyproject.toml000066400000000000000000000002461455552142400222360ustar00rootroot00000000000000[build-system] # Further build requirements come from setup.py via the PEP 517 interface requires = [ "setuptools>=42", ] build-backend = "setuptools.build_meta" uqfoundation-multiprocess-b3457a5/pypy3.10/000077500000000000000000000000001455552142400206235ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/README_MODS000066400000000000000000000023731455552142400223320ustar00rootroot00000000000000cp -rf pypy3.9/examples . cp -rf pypy3.9/doc . cp -f pypy3.9/index.html . cp -rf pypy3.9/module . cp -rf py3.10/multiprocess . cp -rf pypy3.9/_multiprocess . # ---------------------------------------------------------------------- $ diff Python-3.10.9/Lib/test/_test_multiprocessing.py pypy3.10-v7.3.12-src/lib-python/3/test/_test_multiprocessing.py 621c621,622 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 2678c2679,2680 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 2766a2769,2771 > sm = multiprocessing.get_start_method() > if sm == 'fork' and sys.implementation.name == 'pypy': > self.skipTest("race condition on PyPy") 2980c2985,2986 < self.assertRaises(Exception, queue.put, time.sleep) --- > # Changed on PyPy: passing functions to xmlrpc is broken > #self.assertRaises(Exception, queue.put, time.sleep) 3677a3684,3685 > support.gc_collect() # for PyPy and other GCs > 3683a3692 > @test.support.cpython_only 4302c4311,4312 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 4356a4367 > @test.support.cpython_only uqfoundation-multiprocess-b3457a5/pypy3.10/_multiprocess/000077500000000000000000000000001455552142400235135ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/_multiprocess/__init__.py000066400000000000000000000005011455552142400256200ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/pypy3.10/doc/000077500000000000000000000000001455552142400213705ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/doc/CHANGES.html000066400000000000000000001133431455552142400233330ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/COPYING.html000066400000000000000000000040211455552142400233630ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/INSTALL.html000066400000000000000000000063531455552142400233730ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/THANKS.html000066400000000000000000000017751455552142400232600ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/__init__.py000066400000000000000000000004001455552142400234730ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/pypy3.10/doc/connection-objects.html000066400000000000000000000152041455552142400260460ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/connection-objects.txt000066400000000000000000000072761455552142400257330ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/connection-ref.html000066400000000000000000000357371455552142400252060ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/connection-ref.txt000066400000000000000000000210001455552142400250330ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/header.txt000066400000000000000000000003401455552142400233560ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/pypy3.10/doc/html4css1.css000066400000000000000000000126361455552142400237340ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/pypy3.10/doc/index.html000066400000000000000000000064761455552142400234020ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/index.txt000066400000000000000000000021751455552142400232450ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/intro.html000066400000000000000000000427461455552142400234260ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/intro.txt000066400000000000000000000301551455552142400232700ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/manager-objects.html000066400000000000000000000440461455552142400253270ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/manager-objects.txt000066400000000000000000000235161455552142400252010ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/pool-objects.html000066400000000000000000000265511455552142400246670ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/pool-objects.txt000066400000000000000000000136411455552142400245360ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/process-objects.html000066400000000000000000000235741455552142400253760ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/process-objects.txt000066400000000000000000000136131455552142400252420ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/processing-ref.html000066400000000000000000000573611455552142400252200ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/processing-ref.txt000066400000000000000000000310141455552142400250560ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/programming-guidelines.html000066400000000000000000000214551455552142400267350ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/programming-guidelines.txt000066400000000000000000000150221455552142400266010ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/proxy-objects.html000066400000000000000000000175771455552142400251070ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/proxy-objects.txt000066400000000000000000000115571455552142400247520ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/queue-objects.html000066400000000000000000000227101455552142400250330ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/queue-objects.txt000066400000000000000000000121211455552142400247010ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/sharedctypes.html000066400000000000000000000241571455552142400247650ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/pypy3.10/doc/sharedctypes.txt000066400000000000000000000143071455552142400246340ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/tests.html000066400000000000000000000060761455552142400234310ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/pypy3.10/doc/tests.txt000066400000000000000000000027331455552142400233000ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.10/doc/version.txt000066400000000000000000000000341455552142400236130ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/pypy3.10/examples/000077500000000000000000000000001455552142400224415ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/examples/FAILS.txt000066400000000000000000000101261455552142400240400ustar00rootroot00000000000000=== 3.1 --- $ python ex_newtype.py Traceback (most recent call last): File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 104, in import _hashlib ImportError: dlopen(/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so, 2): Library not loaded: /opt/local/lib/libssl.1.0.0.dylib Referenced from: /opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so Reason: image not found During handling of the above exception, another exception occurred: Traceback (most recent call last): File "ex_newtype.py", line 77, in test() File "ex_newtype.py", line 52, in test f1 = manager.Foo1() File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 669, in temp token, exp = self._create(typeid, *args, **kwds) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 567, in _create conn = self._Client(self._address, authkey=self._authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 178, in Client answer_challenge(c, authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 418, in answer_challenge digest = hmac.new(authkey, message).digest() File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 140, in new return HMAC(key, msg, digestmod) File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 46, in __init__ import hashlib File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 135, in md5 = __get_builtin_constructor('md5') File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 62, in __get_builtin_constructor import _md5 ImportError: No module named _md5 $ python ex_pool.py SyntaxError: can not delete variable 'pool' referenced in nested scope === 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8 (with 'fork', 'spawn'+recurse=True) --- $ python ex_pool.py Testing garbage collection: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 288, in test assert not worker.is_alive() AssertionError === 3.8 (with 'spawn'+recurse=False) --- $ python ex_pool.py Ordered results using pool.apply_async(): multiprocess.pool.RemoteTraceback: """ Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) File "ex_pool.py", line 16, in calculate result = func(*args) File "ex_pool.py", line 24, in mul time.sleep(0.5*random.random()) NameError: name 'time' is not defined """ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 68, in test print('\t', r.get()) File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 768, in get raise self._value NameError: name 'time' is not defined $ python ex_synchronize.py 10 Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_synchronize.py", line 17, in value_func random.seed() NameError: name 'random' is not defined $ python ex_workers.py Unordered results: Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_workers.py", line 23, in worker result = calculate(func, args) NameError: name 'calculate' is not defined uqfoundation-multiprocess-b3457a5/pypy3.10/examples/__init__.py000066400000000000000000000000001455552142400245400ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/examples/benchmarks.py000066400000000000000000000131321455552142400251300ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.10/examples/ex_newtype.py000066400000000000000000000030731455552142400252050ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.10/examples/ex_pool.py000066400000000000000000000155061455552142400244670ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.10/examples/ex_synchronize.py000066400000000000000000000144041455552142400260650ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/pypy3.10/examples/ex_webserver.py000066400000000000000000000041001455552142400255060ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.10/examples/ex_workers.py000066400000000000000000000042241455552142400252050ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.10/index.html000066400000000000000000000117511455552142400226250ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/pypy3.10/module/000077500000000000000000000000001455552142400221105ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/000077500000000000000000000000001455552142400250005ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/__init__.py000066400000000000000000000000001455552142400270770ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/interp_memory.py000066400000000000000000000010051455552142400302370ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from pypy.interpreter.error import oefmt from pypy.module.mmap.interp_mmap import W_MMap def address_of_buffer(space, w_obj): if space.config.objspace.usemodules.mmap: mmap = space.interp_w(W_MMap, w_obj) address = rffi.cast(rffi.SIZE_T, mmap.mmap.data) return space.newtuple2(space.newint(address), space.newint(mmap.mmap.size)) else: raise oefmt(space.w_TypeError, "cannot get address of buffer") uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/interp_semaphore.py000066400000000000000000000522731455552142400307270ustar00rootroot00000000000000import errno import os import sys import time from rpython.rlib import jit, rgc, rthread from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import GetSetProperty, TypeDef RECURSIVE_MUTEX, SEMAPHORE = range(2) sys_platform = sys.platform if sys.platform == 'win32': from rpython.rlib import rwin32 from pypy.module._multiprocessing.interp_win32_py3 import ( _GetTickCount, handle_w) SEM_VALUE_MAX = int(2**31-1) # max rffi.LONG _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], rwin32.HANDLE, save_err=rffi.RFFI_FULL_LASTERROR) _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], rwin32.BOOL, save_err=rffi.RFFI_SAVE_LASTERROR, releasegil=False) def sem_unlink(name): return None else: from rpython.rlib import rposix if sys.platform == 'darwin': libraries = [] else: libraries = ['rt'] eci = ExternalCompilationInfo( includes = ['sys/time.h', 'limits.h', 'semaphore.h', ], libraries = libraries, ) class CConfig: _compilation_info_ = eci TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)]) TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.TIME_T), ('tv_nsec', rffi.LONG)]) SEM_FAILED = platform.ConstantInteger('SEM_FAILED') SEM_VALUE_MAX = platform.DefinedConstantInteger('SEM_VALUE_MAX') SEM_TIMED_WAIT = platform.Has('sem_timedwait') SEM_T_SIZE = platform.SizeOf('sem_t') config = platform.configure(CConfig) TIMEVAL = config['TIMEVAL'] TIMESPEC = config['TIMESPEC'] TIMEVALP = rffi.CArrayPtr(TIMEVAL) TIMESPECP = rffi.CArrayPtr(TIMESPEC) SEM_T = rffi.COpaquePtr('sem_t', compilation_info=eci) # rffi.cast(SEM_T, config['SEM_FAILED']) SEM_FAILED = config['SEM_FAILED'] SEM_VALUE_MAX = config['SEM_VALUE_MAX'] if SEM_VALUE_MAX is None: # on Hurd SEM_VALUE_MAX = sys.maxint SEM_TIMED_WAIT = config['SEM_TIMED_WAIT'] SEM_T_SIZE = config['SEM_T_SIZE'] if sys.platform == 'darwin': HAVE_BROKEN_SEM_GETVALUE = True else: HAVE_BROKEN_SEM_GETVALUE = False def external(name, args, result, **kwargs): return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ _sem_close_no_errno = external('sem_close', [SEM_T], rffi.INT, releasegil=False) _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_wait = external('sem_wait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_post = external('sem_post', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, TIMEVALP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) @jit.dont_look_inside def sem_open(name, oflag, mode, value): res = _sem_open(name, oflag, mode, value) if res == rffi.cast(SEM_T, SEM_FAILED): raise OSError(rposix.get_saved_errno(), "sem_open failed") return res def sem_close(handle): res = _sem_close(handle) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_close failed") def sem_unlink(name): res = _sem_unlink(name) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_unlink failed") def sem_wait(sem): res = _sem_wait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_wait failed") def sem_trywait(sem): res = _sem_trywait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_trywait failed") def sem_timedwait(sem, deadline): res = _sem_timedwait(sem, deadline) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_timedwait failed") def _sem_timedwait_save(sem, deadline): delay = 0 void = lltype.nullptr(rffi.VOIDP.TO) with lltype.scoped_alloc(TIMEVALP.TO, 1) as tvdeadline: while True: # poll if _sem_trywait(sem) == 0: return 0 elif rposix.get_saved_errno() != errno.EAGAIN: return -1 now = gettimeofday() c_tv_sec = rffi.getintfield(deadline[0], 'c_tv_sec') c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec') if (c_tv_sec < now[0] or (c_tv_sec == now[0] and c_tv_nsec <= now[1])): rposix.set_saved_errno(errno.ETIMEDOUT) return -1 # calculate how much time is left difference = ((c_tv_sec - now[0]) * 1000000 + (c_tv_nsec - now[1])) # check delay not too long -- maximum is 20 msecs if delay > 20000: delay = 20000 if delay > difference: delay = difference delay += 1000 # sleep rffi.setintfield(tvdeadline[0], 'c_tv_sec', delay / 1000000) rffi.setintfield(tvdeadline[0], 'c_tv_usec', delay % 1000000) if _select(0, void, void, void, tvdeadline) < 0: return -1 if SEM_TIMED_WAIT: _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) else: _sem_timedwait = _sem_timedwait_save def sem_post(sem): res = _sem_post(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_post failed") def sem_getvalue(sem): sval_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: res = _sem_getvalue(sem, sval_ptr) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_getvalue failed") return rffi.cast(lltype.Signed, sval_ptr[0]) finally: lltype.free(sval_ptr, flavor='raw') def gettimeofday(): now = lltype.malloc(TIMEVALP.TO, 1, flavor='raw') try: res = _gettimeofday(now, None) if res < 0: raise OSError(rposix.get_saved_errno(), "gettimeofday failed") return (rffi.getintfield(now[0], 'c_tv_sec'), rffi.getintfield(now[0], 'c_tv_usec')) finally: lltype.free(now, flavor='raw') def handle_w(space, w_handle): return rffi.cast(SEM_T, space.int_w(w_handle)) # utilized by POSIX and win32 def semaphore_unlink(space, w_name): name = space.text_w(w_name) try: sem_unlink(name) except OSError as e: raise wrap_oserror(space, e) class CounterState: def __init__(self, space): self.counter = 0 def _cleanup_(self): self.counter = 0 def getCount(self): value = self.counter self.counter += 1 return value # These functions may raise bare OSError or WindowsError, # don't forget to wrap them into OperationError if sys.platform == 'win32': def create_semaphore(space, name, val, max): rwin32.SetLastError_saved(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS err = rwin32.GetLastError_saved() if err != 0: raise WindowsError(err, "CreateSemaphore") return handle def delete_semaphore(handle): _CloseHandle_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: full_msecs = 0 elif space.is_none(w_timeout): full_msecs = rwin32.INFINITE else: timeout = space.float_w(w_timeout) timeout *= 1000.0 if timeout < 0.0: timeout = 0.0 elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise oefmt(space.w_OverflowError, "timeout is too large") full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True msecs = full_msecs start = _GetTickCount() while True: from pypy.module.time.interp_time import State interrupt_event = space.fromcache(State).get_interrupt_event() handles = [self.handle, interrupt_event] # do the wait rwin32.ResetEvent(interrupt_event) res = rwin32.WaitForMultipleObjects(handles, timeout=msecs) if res != rwin32.WAIT_OBJECT_0 + 1: break # got SIGINT so give signal handler a chance to run time.sleep(0.001) # if this is main thread let KeyboardInterrupt be raised _check_signals(space) # recalculate timeout if msecs != rwin32.INFINITE: ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True return False def semlock_release(self, space): if not _ReleaseSemaphore(self.handle, 1, lltype.nullptr(rffi.LONGP.TO)): err = rwin32.GetLastError_saved() if err == 0x0000012a: # ERROR_TOO_MANY_POSTS raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: raise WindowsError(err, "ReleaseSemaphore") def semlock_getvalue(self, space): if rwin32.WaitForSingleObject(self.handle, 0) == rwin32.WAIT_TIMEOUT: return 0 previous_ptr = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw') try: if not _ReleaseSemaphore(self.handle, 1, previous_ptr): raise rwin32.lastSavedWindowsError("ReleaseSemaphore") return intmask(previous_ptr[0]) + 1 finally: lltype.free(previous_ptr, flavor='raw') def semlock_iszero(self, space): return semlock_getvalue(self, space) == 0 else: def create_semaphore(space, name, val, max): sem = sem_open(name, os.O_CREAT | os.O_EXCL, 0600, val) rgc.add_memory_pressure(SEM_T_SIZE) return sem def reopen_semaphore(name): sem = sem_open(name, 0, 0600, 0) rgc.add_memory_pressure(SEM_T_SIZE) return sem def delete_semaphore(handle): _sem_close_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: deadline = lltype.nullptr(TIMESPECP.TO) elif space.is_none(w_timeout): deadline = lltype.nullptr(TIMESPECP.TO) else: timeout = space.float_w(w_timeout) sec = int(timeout) nsec = int(1e9 * (timeout - sec) + 0.5) now_sec, now_usec = gettimeofday() deadline = lltype.malloc(TIMESPECP.TO, 1, flavor='raw') rffi.setintfield(deadline[0], 'c_tv_sec', now_sec + sec) rffi.setintfield(deadline[0], 'c_tv_nsec', now_usec * 1000 + nsec) val = (rffi.getintfield(deadline[0], 'c_tv_sec') + rffi.getintfield(deadline[0], 'c_tv_nsec') / 1000000000) rffi.setintfield(deadline[0], 'c_tv_sec', val) val = rffi.getintfield(deadline[0], 'c_tv_nsec') % 1000000000 rffi.setintfield(deadline[0], 'c_tv_nsec', val) try: while True: try: if not block: sem_trywait(self.handle) elif not deadline: sem_wait(self.handle) else: sem_timedwait(self.handle, deadline) except OSError as e: if e.errno == errno.EINTR: # again _check_signals(space) continue elif e.errno in (errno.EAGAIN, errno.ETIMEDOUT): return False raise _check_signals(space) self.last_tid = rthread.get_ident() self.count += 1 return True finally: if deadline: lltype.free(deadline, flavor='raw') def semlock_release(self, space): if self.kind == RECURSIVE_MUTEX: sem_post(self.handle) return if HAVE_BROKEN_SEM_GETVALUE: # We will only check properly the maxvalue == 1 case if self.maxvalue == 1: # make sure that already locked try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise # it is already locked as expected else: # it was not locked so undo wait and raise sem_post(self.handle) raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: # This check is not an absolute guarantee that the semaphore does # not rise above maxvalue. if sem_getvalue(self.handle) >= self.maxvalue: raise oefmt(space.w_ValueError, "semaphore or lock released too many times") sem_post(self.handle) def semlock_getvalue(self, space): if HAVE_BROKEN_SEM_GETVALUE: raise oefmt(space.w_NotImplementedError, "sem_getvalue is not implemented on this system") else: val = sem_getvalue(self.handle) # some posix implementations use negative numbers to indicate # the number of waiting threads if val < 0: val = 0 return val def semlock_iszero(self, space): if HAVE_BROKEN_SEM_GETVALUE: try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise return True else: sem_post(self.handle) return False else: return semlock_getvalue(self, space) == 0 class W_SemLock(W_Root): def __init__(self, space, handle, kind, maxvalue, name): self.handle = handle self.kind = kind self.count = 0 self.maxvalue = maxvalue self.register_finalizer(space) self.last_tid = -1 self.name = name def name_get(self, space): if self.name is None: return space.w_None return space.newtext(self.name) def kind_get(self, space): return space.newint(self.kind) def maxvalue_get(self, space): return space.newint(self.maxvalue) def handle_get(self, space): h = rffi.cast(rffi.INTPTR_T, self.handle) return space.newint(h) def get_count(self, space): return space.newint(self.count) def _ismine(self): return self.count > 0 and rthread.get_ident() == self.last_tid def is_mine(self, space): return space.newbool(self._ismine()) def is_zero(self, space): try: res = semlock_iszero(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newbool(res) def get_value(self, space): try: val = semlock_getvalue(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newint(val) @unwrap_spec(block=bool) def acquire(self, space, block=True, w_timeout=None): # check whether we already own the lock if self.kind == RECURSIVE_MUTEX and self._ismine(): self.count += 1 return space.w_True try: # sets self.last_tid and increments self.count # those steps need to be as close as possible to # acquiring the semlock for self._ismine() to support # multiple threads got = semlock_acquire(self, space, block, w_timeout) except OSError as e: raise wrap_oserror(space, e) if got: return space.w_True else: return space.w_False def release(self, space): if self.kind == RECURSIVE_MUTEX: if not self._ismine(): raise oefmt(space.w_AssertionError, "attempt to release recursive lock not owned by " "thread") if self.count > 1: self.count -= 1 return try: # Note: a succesful semlock_release() must not release the GIL, # otherwise there is a race condition on self.count semlock_release(self, space) self.count -= 1 except OSError as e: raise wrap_oserror(space, e) def after_fork(self): self.count = 0 @unwrap_spec(kind=int, maxvalue=int, name='text_or_none') def rebuild(space, w_cls, w_handle, kind, maxvalue, name): # if sys_platform != 'win32' and name is not None: # like CPython, in this case ignore 'w_handle' try: handle = reopen_semaphore(name) except OSError as e: raise wrap_oserror(space, e) else: handle = handle_w(space, w_handle) # self = space.allocate_instance(W_SemLock, w_cls) self.__init__(space, handle, kind, maxvalue, name) return self def enter(self, space): return self.acquire(space, w_timeout=space.w_None) def exit(self, space, __args__): self.release(space) def _finalize_(self): delete_semaphore(self.handle) @unwrap_spec(kind=int, value=int, maxvalue=int, name='text', unlink=int) def descr_new(space, w_subtype, kind, value, maxvalue, name, unlink): if kind != RECURSIVE_MUTEX and kind != SEMAPHORE: raise oefmt(space.w_ValueError, "unrecognized kind") counter = space.fromcache(CounterState).getCount() try: handle = create_semaphore(space, name, value, maxvalue) if unlink: sem_unlink(name) name = None except OSError as e: raise wrap_oserror(space, e) self = space.allocate_instance(W_SemLock, w_subtype) self.__init__(space, handle, kind, maxvalue, name) return self W_SemLock.typedef = TypeDef( "SemLock", __new__ = interp2app(descr_new), kind = GetSetProperty(W_SemLock.kind_get), maxvalue = GetSetProperty(W_SemLock.maxvalue_get), handle = GetSetProperty(W_SemLock.handle_get), name = GetSetProperty(W_SemLock.name_get), _count = interp2app(W_SemLock.get_count), _is_mine = interp2app(W_SemLock.is_mine), _is_zero = interp2app(W_SemLock.is_zero), _get_value = interp2app(W_SemLock.get_value), acquire = interp2app(W_SemLock.acquire), release = interp2app(W_SemLock.release), _rebuild = interp2app(W_SemLock.rebuild.im_func, as_classmethod=True), _after_fork = interp2app(W_SemLock.after_fork), __enter__=interp2app(W_SemLock.enter), __exit__=interp2app(W_SemLock.exit), SEM_VALUE_MAX=SEM_VALUE_MAX, ) def _check_signals(space): space.getexecutioncontext().checksignals() uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/interp_win32_py3.py000066400000000000000000000032231455552142400304700ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from rpython.rlib._rsocket_rffi import socketclose, geterrno, socketrecv, send from rpython.rlib import rwin32 from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec def getWindowsError(space): errno = geterrno() message = rwin32.FormatErrorW(errno) w_errcode = space.newint(errno) return OperationError(space.w_WindowsError, space.newtuple([w_errcode, space.newtext(*message), space.w_None, w_errcode])) @unwrap_spec(handle=int) def multiprocessing_closesocket(space, handle): res = socketclose(handle) if res != 0: raise getWindowsError(space) @unwrap_spec(handle=int, buffersize=int) def multiprocessing_recv(space, handle, buffersize): with rffi.scoped_alloc_buffer(buffersize) as buf: read_bytes = socketrecv(handle, buf.raw, buffersize, 0) if read_bytes >= 0: return space.newbytes(buf.str(read_bytes)) raise getWindowsError(space) @unwrap_spec(handle=int, data='bufferstr') def multiprocessing_send(space, handle, data): if data is None: raise OperationError(space.w_ValueError, 'data cannot be None') with rffi.scoped_nonmovingbuffer(data) as dataptr: # rsocket checks for writability of socket with wait_for_data, cpython does check res = send(handle, dataptr, len(data), 0) if res < 0: raise getWindowsError(space) return space.newint(res) def handle_w(space, w_handle): return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) _GetTickCount = rwin32.winexternal( 'GetTickCount', [], rwin32.DWORD) uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/moduledef.py000066400000000000000000000010531455552142400273150ustar00rootroot00000000000000import sys from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): interpleveldefs = { 'SemLock' : 'interp_semaphore.W_SemLock', } appleveldefs = { } if sys.platform == 'win32': interpleveldefs['closesocket'] = 'interp_win32_py3.multiprocessing_closesocket' interpleveldefs['recv'] = 'interp_win32_py3.multiprocessing_recv' interpleveldefs['send'] = 'interp_win32_py3.multiprocessing_send' interpleveldefs['sem_unlink'] = 'interp_semaphore.semaphore_unlink' uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/000077500000000000000000000000001455552142400257575ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/__init__.py000066400000000000000000000000001455552142400300560ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/test_interp_semaphore.py000066400000000000000000000042471455552142400327430ustar00rootroot00000000000000import pytest import time import sys from rpython.rlib.rgil import yield_thread from pypy.interpreter.gateway import interp2app from pypy.module.thread.os_lock import _set_sentinel from pypy.module.thread.os_thread import start_new_thread from pypy.module._multiprocessing.interp_semaphore import ( create_semaphore, delete_semaphore, W_SemLock, sem_unlink) @pytest.mark.skipif(sys.platform == 'win32', reason='hangs on win32') @pytest.mark.parametrize('spaceconfig', [ {'usemodules': ['_multiprocessing', 'thread']}]) @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semlock_release(space): # trigger the setup() code in time.moduledef space.getbuiltinmodule('time') sem_name = '/test8' _handle = create_semaphore(space, sem_name, 1, 1) try: sem_unlink(sem_name) w_lock = W_SemLock(space, _handle, 0, 1, None) created = [] successful = [] N_THREADS = 16 def run(space): w_sentinel = _set_sentinel(space) yield_thread() w_sentinel.descr_lock_acquire(space) # releases GIL try: yield_thread() created.append(w_sentinel) w_got = w_lock.acquire(space, w_timeout=space.newfloat(5.)) # releases GIL if space.is_true(w_got): yield_thread() w_lock.release(space) successful.append(w_sentinel) except: import traceback traceback.print_exc() raise w_run = space.wrap(interp2app(run)) w_lock.acquire(space) for _ in range(N_THREADS): start_new_thread(space, w_run, space.newtuple([])) # releases GIL deadline = time.time() + 5. while len(created) < N_THREADS: assert time.time() < deadline yield_thread() w_lock.release(space) for w_sentinel in created: # Join thread w_sentinel.descr_lock_acquire(space) # releases GIL w_sentinel.descr_lock_release(space) assert len(successful) == N_THREADS finally: delete_semaphore(_handle) uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/test_semaphore.py000066400000000000000000000147421455552142400313630ustar00rootroot00000000000000import py import sys import pytest from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) class AppTestSemaphore: spaceconfig = dict(usemodules=('_multiprocessing', 'thread', 'signal', 'select', 'binascii', 'struct', '_posixsubprocess')) if sys.platform == 'win32': spaceconfig['usemodules'] += ('_rawffi', '_cffi_backend') else: spaceconfig['usemodules'] += ('fcntl',) def setup_class(cls): cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE) cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX) cls.w_runappdirect = cls.space.wrap(cls.runappdirect) @py.test.mark.skipif("sys.platform == 'win32'") def test_sem_unlink(self): from _multiprocessing import sem_unlink import errno try: sem_unlink("non-existent") except OSError as e: assert e.errno in (errno.ENOENT, errno.EINVAL) else: assert 0, "should have raised" def test_semaphore_basic(self): from _multiprocessing import SemLock import sys assert SemLock.SEM_VALUE_MAX > 10 kind = self.SEMAPHORE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "1", unlink=True) assert sem.kind == kind assert sem.maxvalue == maxvalue assert isinstance(sem.handle, int) assert sem.name is None assert sem._count() == 0 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 1 assert sem._is_zero() == False sem.acquire() assert sem._is_mine() assert sem._count() == 1 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 0 assert sem._is_zero() == True sem.release() assert sem._count() == 0 sem.acquire() sem._after_fork() assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_recursive(self): from _multiprocessing import SemLock kind = self.RECURSIVE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "2", unlink=True) sem.acquire() sem.release() assert sem._count() == 0 sem.acquire() sem.release() # now recursively sem.acquire() sem.acquire() assert sem._count() == 2 sem.release() sem.release() @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_maxvalue(self): from _multiprocessing import SemLock import sys kind = self.SEMAPHORE value = SemLock.SEM_VALUE_MAX maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.0", unlink=True) for i in range(10): res = sem.acquire() assert res == True assert sem._count() == i+1 if sys.platform != 'darwin': assert sem._get_value() == maxvalue - (i+1) value = 0 maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.1", unlink=True) for i in range(10): sem.release() assert sem._count() == -(i+1) if sys.platform != 'darwin': assert sem._get_value() == i+1 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_wait(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "3", unlink=True) res = sem.acquire() assert res == True res = sem.acquire(timeout=0.1) assert res == False @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_rebuild(self): import sys if sys.platform == 'win32': from _multiprocessing import SemLock def sem_unlink(*args): pass else: from _multiprocessing import SemLock, sem_unlink kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "4.2", unlink=False) try: sem2 = SemLock._rebuild(-1, kind, value, "4.2") #assert sem.handle != sem2.handle---even though they come # from different calls to sem_open(), on Linux at least, # they are the same pointer sem2 = SemLock._rebuild(sem.handle, kind, value, None) assert sem.handle == sem2.handle finally: sem_unlink("4.2") @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_contextmanager(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "5", unlink=True) with sem: assert sem._count() == 1 assert sem._count() == 0 def test_unlink(self): from _multiprocessing import SemLock sem = SemLock(self.SEMAPHORE, 1, 1, '/mp-123', unlink=True) assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_in_threads(self): from _multiprocessing import SemLock from threading import Thread from time import sleep l = SemLock(0, 1, 1, "6", unlink=True) if self.runappdirect: def f(id): for i in range(10000): pass else: def f(id): for i in range(1000): # reduce the probability of thread switching # at exactly the wrong time in semlock_acquire for j in range(10): pass threads = [Thread(None, f, args=(i,)) for i in range(2)] [t.start() for t in threads] # if the RLock calls to sem_wait and sem_post do not match, # one of the threads will block and the call to join will fail [t.join() for t in threads] uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/test_win32.py000066400000000000000000000016211455552142400303320ustar00rootroot00000000000000import py import sys @py.test.mark.skipif('sys.platform != "win32"') class AppTestWin32: spaceconfig = dict(usemodules=('_multiprocessing', '_cffi_backend', 'signal', '_rawffi', 'binascii', '_socket', 'select')) def setup_class(cls): # import here since importing _multiprocessing imports multiprocessing # (in interp_connection) to get the BufferTooShort exception, which on # win32 imports msvcrt which imports via cffi which allocates ccharp # that are never released. This trips up the LeakChecker if done in a # test function cls.w_multiprocessing = cls.space.appexec([], '(): import multiprocessing as m; return m') def test_closesocket(self): from _multiprocessing import closesocket raises(WindowsError, closesocket, -1) uqfoundation-multiprocess-b3457a5/pypy3.10/module/_multiprocess/test/test_ztranslation.py000066400000000000000000000001651455552142400321220ustar00rootroot00000000000000from pypy.objspace.fake.checkmodule import checkmodule def test_checkmodule(): checkmodule('_multiprocessing') uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/000077500000000000000000000000001455552142400233545ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/__init__.py000066400000000000000000000035001455552142400254630ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/connection.py000066400000000000000000000761431455552142400261000ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/context.py000066400000000000000000000265321455552142400254220ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) @staticmethod def _after_fork(): return _default_context.get_context().Process._after_fork() class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) @staticmethod def _after_fork(): # process is spawned, nothing to do pass class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/dummy/000077500000000000000000000000001455552142400245075ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400266350ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400272260ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/forkserver.py000066400000000000000000000275521455552142400261310ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/heap.py000066400000000000000000000265521455552142400246550ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/managers.py000066400000000000000000001347121455552142400255330ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def _handle_request(self, c): request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) def handle_request(self, conn): ''' Handle a new connection ''' try: self._handle_request(conn) except SystemExit: # Server.serve_client() calls sys.exit(0) on EOF pass finally: conn.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/pool.py000066400000000000000000000777671455552142400247270ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if maxtasksperchild is not None: if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: raise ValueError("maxtasksperchild must be a positive int or None") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/popen_fork.py000066400000000000000000000045061455552142400260750ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400273240ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400275130ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400273140ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/process.py000066400000000000000000000274631455552142400254200ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: self._after_fork() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode @staticmethod def _after_fork(): from . import util util._finalizer_registry.clear() util._run_after_forkers() # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/queues.py000066400000000000000000000275531455552142400252510ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/reduction.py000066400000000000000000000226451455552142400257330ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400271170ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/resource_tracker.py000066400000000000000000000215401455552142400272720ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem # Use sem_unlink() to clean up named semaphores. # # sem_unlink() may be missing if the Python build process detected the # absence of POSIX named semaphores. In that case, no named semaphores were # ever opened, so no cleanup would be necessary. if hasattr(_multiprocessing, 'sem_unlink'): _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, }) _CLEANUP_FUNCS.update({ 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(msg) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('msg too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/shared_memory.py000066400000000000000000000440321455552142400265670ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True from . import resource_tracker _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise resource_tracker.register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) try: size = _winapi.VirtualQuerySize(p_buf) finally: _winapi.UnmapViewOfFile(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: _posixshmem.shm_unlink(self._name) resource_tracker.unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400264270ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/spawn.py000066400000000000000000000221151455552142400250570ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/synchronize.py000066400000000000000000000270651455552142400263130ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except ImportError: try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/000077500000000000000000000000001455552142400245165ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/__init__.py000066400000000000000000006056601455552142400266440ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import textwrap import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import import_helper from test.support import os_helper from test.support import socket_helper from test.support import threading_helper from test.support import warnings_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = import_helper.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock") # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused threading_helper.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @unittest.skipIf(True, 'bad pipe in pypy3') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() for i in range(3): gc.collect() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(os_helper.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, encoding="utf-8") as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) #self.assertIn("1/0 # MARKER", err) #FIXME @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, encoding="utf-8") as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with os_helper.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w', encoding="utf-8") as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with import_helper.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = time.monotonic() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.monotonic() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = time.monotonic() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.monotonic() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_nested_queue(self): a = self.list() # Test queue inside list a.append(self.Queue()) a[0].put(123) self.assertEqual(a[0].get(), 123) b = self.dict() # Test queue inside dict b[0] = self.Queue() b[0].put(456) self.assertEqual(b[0].get(), 456) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = time.monotonic() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(time.monotonic() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs for i in range(3): gc.collect() time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with warnings_helper.check_warnings( ('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): sm = multiprocessing.get_start_method() if sm == 'fork' and sys.implementation.name == 'pypy': self.skipTest("race condition on PyPy") p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_pool_maxtasksperchild_invalid(self): for value in [0, -1, 0.5, "12"]: with self.assertRaises(ValueError): multiprocessing.Pool(3, maxtasksperchild=value) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. # Changed on PyPy: passing functions to xmlrpc is broken #self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue @hashlib_helper.requires_hashdigest('md5') class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(os_helper.unlink, os_helper.TESTFN) with open(os_helper.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(os_helper.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def _test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() support.gc_collect() # for PyPy and other GCs self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) @test.support.cpython_only def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Verify __repr__ self.assertIn(sms.name, str(sms)) self.assertIn(str(sms.size), str(sms)) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() @unittest.skipIf(True, "fails with dill >= 0.3.5") def test_shared_memory_recreate(self): # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocess.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) def test_invalid_shared_memory_cration(self): # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_pickle_unpickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) sms.buf[0:6] = b'pickle' # Test pickling pickled_sms = pickle.dumps(sms, protocol=proto) # Test unpickling sms2 = pickle.loads(pickled_sms) self.assertIsInstance(sms2, shared_memory.SharedMemory) self.assertEqual(sms.name, sms2.name) self.assertEqual(bytes(sms.buf[0:6]), b'pickle') self.assertEqual(bytes(sms2.buf[0:6]), b'pickle') # Test that unpickled version is still the same SharedMemory sms.buf[0:6] = b'newval' self.assertEqual(bytes(sms.buf[0:6]), b'newval') self.assertEqual(bytes(sms2.buf[0:6]), b'newval') sms2.buf[0:6] = b'oldval' self.assertEqual(bytes(sms.buf[0:6]), b'oldval') self.assertEqual(bytes(sms2.buf[0:6]), b'oldval') def test_shared_memory_pickle_unpickle_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sms = shared_memory.SharedMemory(create=True, size=512) sms.buf[0:6] = b'pickle' pickled_sms = pickle.dumps(sms, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sms.close() sms.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(pickled_sms) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify __repr__ self.assertIn(sl.shm.name, str(sl)) self.assertIn(str(list(sl)), str(sl)) # Index Out of Range (get) with self.assertRaises(IndexError): sl[7] # Index Out of Range (set) with self.assertRaises(IndexError): sl[7] = 2 # Assign value without format change (str -> str) current_format = sl._get_packing_format(0) sl[0] = 'howdy' self.assertEqual(current_format, sl._get_packing_format(0)) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl, protocol=proto) deserialized_sl = pickle.loads(serialized_sl) self.assertIsInstance( deserialized_sl, shared_memory.ShareableList) self.assertEqual(deserialized_sl[-1], 9) self.assertIsNot(sl, deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") sl[3] = "newvalue" self.assertEqual(deserialized_sl[3], "newvalue") larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto) self.assertEqual(len(serialized_sl), len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_ShareableList_pickling_dead_object(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): sl = shared_memory.ShareableList(range(10)) serialized_sl = pickle.dumps(sl, protocol=proto) # Now, we are going to kill the original object. # So, unpickled one won't be able to attach to it. sl.shm.close() sl.shm.unlink() with self.assertRaises(FileNotFoundError): pickle.loads(serialized_sl) def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT t = 0.1 while time.monotonic() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # Test to verify that `Finalize` works. # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): for i in range(3): gc.collect() self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @test.support.cpython_only def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with threading_helper.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = time.monotonic() res = wait([a, b], expected) delta = time.monotonic() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = time.monotonic() res = wait([a, b], 20) delta = time.monotonic() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.monotonic() res = wait([a, p.sentinel, b], expected + 20) delta = time.monotonic() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.monotonic() res = wait([a, p.sentinel, b], 20) delta = time.monotonic() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = time.monotonic() res = wait([a], timeout=-1) t = time.monotonic() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def _test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = time.monotonic() + support.LONG_TIMEOUT while time.monotonic() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) def test_too_long_name_resource(self): # gh-96819: Resource names that will make the length of a write to a pipe # greater than PIPE_BUF are not allowed rtype = "shared_memory" too_long_name_resource = "a" * (512 - len(rtype)) with self.assertRaises(ValueError): resource_tracker.register(too_long_name_resource, rtype) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class TestNamedResource(unittest.TestCase): @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") def test_global_named_resource_spawn(self): # # gh-90549: Check that global named resources in main module # will not leak by a subprocess, in spawn context. # testfn = os_helper.TESTFN self.addCleanup(os_helper.unlink, testfn) with open(testfn, 'w', encoding='utf-8') as f: f.write(textwrap.dedent('''\ import multiprocess as mp ctx = mp.get_context('spawn') global_resource = ctx.Semaphore() def submain(): pass if __name__ == '__main__': p = ctx.Process(target=submain) p.start() p.join() ''')) rc, out, err = test.support.script_helper.assert_python_ok(testfn, **ENV) # on error, err = 'UserWarning: resource_tracker: There appear to # be 1 leaked semaphore objects to clean up at shutdown' self.assertEqual(err, b'') class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in not_exported are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, not_exported=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule @unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available') @unittest.skipIf(sys.platform != "linux", "Linux only") @unittest.skipIf(sys.hexversion <= 0x30a05f0, "SemLock subclass") class SemLockTests(unittest.TestCase): def test_semlock_subclass(self): class SemLock(_multiprocessing.SemLock): pass name = f'test_semlock_subclass-{os.getpid()}' s = SemLock(1, 0, 10, name, False) _multiprocessing.sem_unlink(name) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400266070ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400275210ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400272170ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400324030ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400336310ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000273621455552142400342400ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support from test.support import import_helper # Skip tests if _multiprocessing wasn't built. import_helper.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support import os_helper from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. import_helper.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with os_helper.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with os_helper.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = import_helper.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400325660ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.10/multiprocess/util.py000066400000000000000000000332741455552142400247140ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, encoding="utf-8", closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None, None, None, -1, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/pypy3.8/000077500000000000000000000000001455552142400205525ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/README_MODS000066400000000000000000000152371455552142400222640ustar00rootroot00000000000000cp -rf py3.8/examples . cp -rf py3.8/doc . cp -f py3.8/index.html . cp -rf pypy3.8-v7.3.7-src/pypy/module/_multiprocessing module/_multiprocess cp -rf pypy3.8-v7.3.7-src/lib-python/3/multiprocessing multiprocess cp -rf pypy3.8-v7.3.7-src/lib-python/3/test/test_multiprocessing*py multiprocess/tests cp -rf pypy3.8-v7.3.7-src/lib-python/3/test/_test_multiprocessing.py multiprocess/tests/__init__.py cp -rf pypy3.8-v7.3.7-src/lib-python/3/test/mp_*py multiprocess/tests cp -f py3.8/multiprocess/tests/__main__.py multiprocess/tests cp -rf py3.8/_multiprocess _multiprocess # ---------------------------------------------------------------------- EDIT multiprocess/__init__: __version__ EDIT multiprocess: multiprocessing --> multiprocess EDIT multiprocess: pickle --> dill ADDED *args, **kwds for ForkingPickler in __init__ and dump EDIT time.monotonic --> getattr(time,'monotonic',time.time) EDIT multiprocess/dummy: multiprocessing --> multiprocess # ---------------------------------------------------------------------- diff py3.8/multiprocess/managers.py pypy3.8/multiprocess/managers.py 1177c1177 < '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', --- > '__setitem__', 'clear', 'copy', 'get', 'items', diff py3.8/multiprocess/synchronize.py pypy3.8/multiprocess/synchronize.py 32c32 < except ImportError: --- > except (ImportError): # ---------------------------------------------------------------------- EDIT multiprocess/tests: multiprocessing --> multiprocess # ---------------------------------------------------------------------- diff py3.8/multiprocess/tests/__init__.py pypy3.8/multiprocess/tests/__init__.py 572a573 > @unittest.skipIf(True, 'bad pipe in pypy3') 615a617,618 > for i in range(3): > gc.collect() 1935a1939 > @unittest.skipIf(True, 'bad timeout in pypy3') 2655a2660,2661 > for i in range(3): > gc.collect() 2952c2958,2959 < self.assertRaises(Exception, queue.put, time.sleep) --- > # Changed on PyPy: passing functions to xmlrpc is broken > #self.assertRaises(Exception, queue.put, time.sleep) 3589c3596 < def test_heap(self): --- > def _test_heap(self): 3652a3660 > @test.support.cpython_only 4124a4133,4134 > for i in range(3): > gc.collect() 4135a4146 > import gc; gc.collect() 4141a4153 > import gc; gc.collect() 4176a4189 > @test.support.cpython_only 5030c5043 < def test_preload_resources(self): --- > def _test_preload_resources(self): # ---------------------------------------------------------------------- $ diff pypy3.8-v7.3.8-src/lib-python/3/test/_test_multiprocessing.py pypy3.8-v7.3.9-src/lib-python/3/test/_test_multiprocessing.py 3757a3758,3763 > def _new_shm_name(self, prefix): > # Add a PID to the name of a POSIX shared memory object to allow > # running multiprocessing tests (test_multiprocessing_fork, > # test_multiprocessing_spawn, etc) in parallel. > return prefix + str(os.getpid()) > 3759c3765,3766 < sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512) --- > name_tsmb = self._new_shm_name('test01_tsmb') > sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) 3763c3770 < self.assertEqual(sms.name, 'test01_tsmb') --- > self.assertEqual(sms.name, name_tsmb) 3772c3779 < also_sms = shared_memory.SharedMemory('test01_tsmb') --- > also_sms = shared_memory.SharedMemory(name_tsmb) 3777c3784 < same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size) --- > same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) 3787a3795,3800 > name_dblunlink = self._new_shm_name('test01_dblunlink') > sms_uno = shared_memory.SharedMemory( > name_dblunlink, > create=True, > size=5000 > ) 3789,3794d3801 < sms_uno = shared_memory.SharedMemory( < 'test01_dblunlink', < create=True, < size=5000 < ) < 3798c3805 < sms_duo = shared_memory.SharedMemory('test01_dblunlink') --- > sms_duo = shared_memory.SharedMemory(name_dblunlink) 3810c3817 < 'test01_tsmb', --- > name_tsmb, 3824c3831 < ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb') --- > ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) 4012c4019,4020 < sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate') --- > name_duplicate = self._new_shm_name('test03_duplicate') > sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) 4015c4023 < self.assertEqual('test03_duplicate', sl_copy.shm.name) --- > self.assertEqual(name_duplicate, sl_copy.shm.name) # ---------------------------------------------------------------------- diff pypy3.8-v7.3.9-src/pypy/module/_multiprocessing/interp_memory.py pypy3.9-v7.3.10-src/pypy/module/_multiprocessing/interp_memory.py 10,11c10,11 < return space.newtuple([space.newint(address), < space.newint(mmap.mmap.size)]) --- > return space.newtuple2(space.newint(address), > space.newint(mmap.mmap.size)) # ---------------------------------------------------------------------- diff pypy3.8-v7.3.9-src/pypy/module/_multiprocessing/test/test_interp_semaphore.py pypy3.8-v7.3.11-src/pypy/module/_multiprocessing/test/test_interp_semaphore.py 5d4 < from pypy.tool.pytest.objspace import gettestobjspace 15a15 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") diff pypy3.8-v7.3.9-src/pypy/module/_multiprocessing/test/test_semaphore.py pypy3.8-v7.3.11-src/pypy/module/_multiprocessing/test/test_semaphore.py 2a3 > import pytest 70a72 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 92a95 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 117a121 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 129a134 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 151a157 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 167a174 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") NOTE: semaphore_tracker throws KeyError in multiprocess and multiprocessing NOTE: semaphore_tracker throws KeyError in multiprocess and multiprocessing # ---------------------------------------------------------------------- Traceback (most recent call last): File "multiprocess/resource_tracker.py", line 204, in main cache[rtype].remove(name) KeyError: '/mp-v2zejd7s' uqfoundation-multiprocess-b3457a5/pypy3.8/_multiprocess/000077500000000000000000000000001455552142400234425ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/_multiprocess/__init__.py000066400000000000000000000005011455552142400255470ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/pypy3.8/doc/000077500000000000000000000000001455552142400213175ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/doc/CHANGES.html000066400000000000000000001133431455552142400232620ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/COPYING.html000066400000000000000000000040211455552142400233120ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/INSTALL.html000066400000000000000000000063531455552142400233220ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/THANKS.html000066400000000000000000000017751455552142400232070ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/__init__.py000066400000000000000000000004001455552142400234220ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/pypy3.8/doc/connection-objects.html000066400000000000000000000152041455552142400257750ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/connection-objects.txt000066400000000000000000000072761455552142400256620ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/connection-ref.html000066400000000000000000000357371455552142400251350ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/connection-ref.txt000066400000000000000000000210001455552142400247620ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/header.txt000066400000000000000000000003401455552142400233050ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/pypy3.8/doc/html4css1.css000066400000000000000000000126361455552142400236630ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/pypy3.8/doc/index.html000066400000000000000000000064761455552142400233310ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/index.txt000066400000000000000000000021751455552142400231740ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/intro.html000066400000000000000000000427461455552142400233550ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/intro.txt000066400000000000000000000301551455552142400232170ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/manager-objects.html000066400000000000000000000440461455552142400252560ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/manager-objects.txt000066400000000000000000000235161455552142400251300ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/pool-objects.html000066400000000000000000000265511455552142400246160ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/pool-objects.txt000066400000000000000000000136411455552142400244650ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/process-objects.html000066400000000000000000000235741455552142400253250ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/process-objects.txt000066400000000000000000000136131455552142400251710ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/processing-ref.html000066400000000000000000000573611455552142400251470ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/processing-ref.txt000066400000000000000000000310141455552142400250050ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/programming-guidelines.html000066400000000000000000000214551455552142400266640ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/programming-guidelines.txt000066400000000000000000000150221455552142400265300ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/proxy-objects.html000066400000000000000000000175771455552142400250360ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/proxy-objects.txt000066400000000000000000000115571455552142400247010ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/queue-objects.html000066400000000000000000000227101455552142400247620ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/queue-objects.txt000066400000000000000000000121211455552142400246300ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/sharedctypes.html000066400000000000000000000241571455552142400247140ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/pypy3.8/doc/sharedctypes.txt000066400000000000000000000143071455552142400245630ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/tests.html000066400000000000000000000060761455552142400233600ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/pypy3.8/doc/tests.txt000066400000000000000000000027331455552142400232270ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.8/doc/version.txt000066400000000000000000000000341455552142400235420ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/pypy3.8/examples/000077500000000000000000000000001455552142400223705ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/examples/FAILS.txt000066400000000000000000000101261455552142400237670ustar00rootroot00000000000000=== 3.1 --- $ python ex_newtype.py Traceback (most recent call last): File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 104, in import _hashlib ImportError: dlopen(/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so, 2): Library not loaded: /opt/local/lib/libssl.1.0.0.dylib Referenced from: /opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so Reason: image not found During handling of the above exception, another exception occurred: Traceback (most recent call last): File "ex_newtype.py", line 77, in test() File "ex_newtype.py", line 52, in test f1 = manager.Foo1() File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 669, in temp token, exp = self._create(typeid, *args, **kwds) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 567, in _create conn = self._Client(self._address, authkey=self._authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 178, in Client answer_challenge(c, authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 418, in answer_challenge digest = hmac.new(authkey, message).digest() File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 140, in new return HMAC(key, msg, digestmod) File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 46, in __init__ import hashlib File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 135, in md5 = __get_builtin_constructor('md5') File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 62, in __get_builtin_constructor import _md5 ImportError: No module named _md5 $ python ex_pool.py SyntaxError: can not delete variable 'pool' referenced in nested scope === 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8 (with 'fork', 'spawn'+recurse=True) --- $ python ex_pool.py Testing garbage collection: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 288, in test assert not worker.is_alive() AssertionError === 3.8 (with 'spawn'+recurse=False) --- $ python ex_pool.py Ordered results using pool.apply_async(): multiprocess.pool.RemoteTraceback: """ Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) File "ex_pool.py", line 16, in calculate result = func(*args) File "ex_pool.py", line 24, in mul time.sleep(0.5*random.random()) NameError: name 'time' is not defined """ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 68, in test print('\t', r.get()) File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 768, in get raise self._value NameError: name 'time' is not defined $ python ex_synchronize.py 10 Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_synchronize.py", line 17, in value_func random.seed() NameError: name 'random' is not defined $ python ex_workers.py Unordered results: Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_workers.py", line 23, in worker result = calculate(func, args) NameError: name 'calculate' is not defined uqfoundation-multiprocess-b3457a5/pypy3.8/examples/__init__.py000066400000000000000000000000001455552142400244670ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/examples/benchmarks.py000066400000000000000000000131321455552142400250570ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.8/examples/ex_newtype.py000066400000000000000000000030731455552142400251340ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.8/examples/ex_pool.py000066400000000000000000000155061455552142400244160ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.8/examples/ex_synchronize.py000066400000000000000000000144041455552142400260140ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/pypy3.8/examples/ex_webserver.py000066400000000000000000000041001455552142400254350ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.8/examples/ex_workers.py000066400000000000000000000042241455552142400251340ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.8/index.html000066400000000000000000000117511455552142400225540ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/pypy3.8/module/000077500000000000000000000000001455552142400220375ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/000077500000000000000000000000001455552142400247275ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/__init__.py000066400000000000000000000000001455552142400270260ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/interp_memory.py000066400000000000000000000010051455552142400301660ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from pypy.interpreter.error import oefmt from pypy.module.mmap.interp_mmap import W_MMap def address_of_buffer(space, w_obj): if space.config.objspace.usemodules.mmap: mmap = space.interp_w(W_MMap, w_obj) address = rffi.cast(rffi.SIZE_T, mmap.mmap.data) return space.newtuple2(space.newint(address), space.newint(mmap.mmap.size)) else: raise oefmt(space.w_TypeError, "cannot get address of buffer") uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/interp_semaphore.py000066400000000000000000000522731455552142400306560ustar00rootroot00000000000000import errno import os import sys import time from rpython.rlib import jit, rgc, rthread from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import GetSetProperty, TypeDef RECURSIVE_MUTEX, SEMAPHORE = range(2) sys_platform = sys.platform if sys.platform == 'win32': from rpython.rlib import rwin32 from pypy.module._multiprocessing.interp_win32_py3 import ( _GetTickCount, handle_w) SEM_VALUE_MAX = int(2**31-1) # max rffi.LONG _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], rwin32.HANDLE, save_err=rffi.RFFI_FULL_LASTERROR) _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], rwin32.BOOL, save_err=rffi.RFFI_SAVE_LASTERROR, releasegil=False) def sem_unlink(name): return None else: from rpython.rlib import rposix if sys.platform == 'darwin': libraries = [] else: libraries = ['rt'] eci = ExternalCompilationInfo( includes = ['sys/time.h', 'limits.h', 'semaphore.h', ], libraries = libraries, ) class CConfig: _compilation_info_ = eci TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)]) TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.TIME_T), ('tv_nsec', rffi.LONG)]) SEM_FAILED = platform.ConstantInteger('SEM_FAILED') SEM_VALUE_MAX = platform.DefinedConstantInteger('SEM_VALUE_MAX') SEM_TIMED_WAIT = platform.Has('sem_timedwait') SEM_T_SIZE = platform.SizeOf('sem_t') config = platform.configure(CConfig) TIMEVAL = config['TIMEVAL'] TIMESPEC = config['TIMESPEC'] TIMEVALP = rffi.CArrayPtr(TIMEVAL) TIMESPECP = rffi.CArrayPtr(TIMESPEC) SEM_T = rffi.COpaquePtr('sem_t', compilation_info=eci) # rffi.cast(SEM_T, config['SEM_FAILED']) SEM_FAILED = config['SEM_FAILED'] SEM_VALUE_MAX = config['SEM_VALUE_MAX'] if SEM_VALUE_MAX is None: # on Hurd SEM_VALUE_MAX = sys.maxint SEM_TIMED_WAIT = config['SEM_TIMED_WAIT'] SEM_T_SIZE = config['SEM_T_SIZE'] if sys.platform == 'darwin': HAVE_BROKEN_SEM_GETVALUE = True else: HAVE_BROKEN_SEM_GETVALUE = False def external(name, args, result, **kwargs): return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ _sem_close_no_errno = external('sem_close', [SEM_T], rffi.INT, releasegil=False) _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_wait = external('sem_wait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_post = external('sem_post', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, TIMEVALP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) @jit.dont_look_inside def sem_open(name, oflag, mode, value): res = _sem_open(name, oflag, mode, value) if res == rffi.cast(SEM_T, SEM_FAILED): raise OSError(rposix.get_saved_errno(), "sem_open failed") return res def sem_close(handle): res = _sem_close(handle) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_close failed") def sem_unlink(name): res = _sem_unlink(name) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_unlink failed") def sem_wait(sem): res = _sem_wait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_wait failed") def sem_trywait(sem): res = _sem_trywait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_trywait failed") def sem_timedwait(sem, deadline): res = _sem_timedwait(sem, deadline) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_timedwait failed") def _sem_timedwait_save(sem, deadline): delay = 0 void = lltype.nullptr(rffi.VOIDP.TO) with lltype.scoped_alloc(TIMEVALP.TO, 1) as tvdeadline: while True: # poll if _sem_trywait(sem) == 0: return 0 elif rposix.get_saved_errno() != errno.EAGAIN: return -1 now = gettimeofday() c_tv_sec = rffi.getintfield(deadline[0], 'c_tv_sec') c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec') if (c_tv_sec < now[0] or (c_tv_sec == now[0] and c_tv_nsec <= now[1])): rposix.set_saved_errno(errno.ETIMEDOUT) return -1 # calculate how much time is left difference = ((c_tv_sec - now[0]) * 1000000 + (c_tv_nsec - now[1])) # check delay not too long -- maximum is 20 msecs if delay > 20000: delay = 20000 if delay > difference: delay = difference delay += 1000 # sleep rffi.setintfield(tvdeadline[0], 'c_tv_sec', delay / 1000000) rffi.setintfield(tvdeadline[0], 'c_tv_usec', delay % 1000000) if _select(0, void, void, void, tvdeadline) < 0: return -1 if SEM_TIMED_WAIT: _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) else: _sem_timedwait = _sem_timedwait_save def sem_post(sem): res = _sem_post(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_post failed") def sem_getvalue(sem): sval_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: res = _sem_getvalue(sem, sval_ptr) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_getvalue failed") return rffi.cast(lltype.Signed, sval_ptr[0]) finally: lltype.free(sval_ptr, flavor='raw') def gettimeofday(): now = lltype.malloc(TIMEVALP.TO, 1, flavor='raw') try: res = _gettimeofday(now, None) if res < 0: raise OSError(rposix.get_saved_errno(), "gettimeofday failed") return (rffi.getintfield(now[0], 'c_tv_sec'), rffi.getintfield(now[0], 'c_tv_usec')) finally: lltype.free(now, flavor='raw') def handle_w(space, w_handle): return rffi.cast(SEM_T, space.int_w(w_handle)) # utilized by POSIX and win32 def semaphore_unlink(space, w_name): name = space.text_w(w_name) try: sem_unlink(name) except OSError as e: raise wrap_oserror(space, e) class CounterState: def __init__(self, space): self.counter = 0 def _cleanup_(self): self.counter = 0 def getCount(self): value = self.counter self.counter += 1 return value # These functions may raise bare OSError or WindowsError, # don't forget to wrap them into OperationError if sys.platform == 'win32': def create_semaphore(space, name, val, max): rwin32.SetLastError_saved(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS err = rwin32.GetLastError_saved() if err != 0: raise WindowsError(err, "CreateSemaphore") return handle def delete_semaphore(handle): _CloseHandle_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: full_msecs = 0 elif space.is_none(w_timeout): full_msecs = rwin32.INFINITE else: timeout = space.float_w(w_timeout) timeout *= 1000.0 if timeout < 0.0: timeout = 0.0 elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise oefmt(space.w_OverflowError, "timeout is too large") full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True msecs = full_msecs start = _GetTickCount() while True: from pypy.module.time.interp_time import State interrupt_event = space.fromcache(State).get_interrupt_event() handles = [self.handle, interrupt_event] # do the wait rwin32.ResetEvent(interrupt_event) res = rwin32.WaitForMultipleObjects(handles, timeout=msecs) if res != rwin32.WAIT_OBJECT_0 + 1: break # got SIGINT so give signal handler a chance to run time.sleep(0.001) # if this is main thread let KeyboardInterrupt be raised _check_signals(space) # recalculate timeout if msecs != rwin32.INFINITE: ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True return False def semlock_release(self, space): if not _ReleaseSemaphore(self.handle, 1, lltype.nullptr(rffi.LONGP.TO)): err = rwin32.GetLastError_saved() if err == 0x0000012a: # ERROR_TOO_MANY_POSTS raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: raise WindowsError(err, "ReleaseSemaphore") def semlock_getvalue(self, space): if rwin32.WaitForSingleObject(self.handle, 0) == rwin32.WAIT_TIMEOUT: return 0 previous_ptr = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw') try: if not _ReleaseSemaphore(self.handle, 1, previous_ptr): raise rwin32.lastSavedWindowsError("ReleaseSemaphore") return intmask(previous_ptr[0]) + 1 finally: lltype.free(previous_ptr, flavor='raw') def semlock_iszero(self, space): return semlock_getvalue(self, space) == 0 else: def create_semaphore(space, name, val, max): sem = sem_open(name, os.O_CREAT | os.O_EXCL, 0600, val) rgc.add_memory_pressure(SEM_T_SIZE) return sem def reopen_semaphore(name): sem = sem_open(name, 0, 0600, 0) rgc.add_memory_pressure(SEM_T_SIZE) return sem def delete_semaphore(handle): _sem_close_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: deadline = lltype.nullptr(TIMESPECP.TO) elif space.is_none(w_timeout): deadline = lltype.nullptr(TIMESPECP.TO) else: timeout = space.float_w(w_timeout) sec = int(timeout) nsec = int(1e9 * (timeout - sec) + 0.5) now_sec, now_usec = gettimeofday() deadline = lltype.malloc(TIMESPECP.TO, 1, flavor='raw') rffi.setintfield(deadline[0], 'c_tv_sec', now_sec + sec) rffi.setintfield(deadline[0], 'c_tv_nsec', now_usec * 1000 + nsec) val = (rffi.getintfield(deadline[0], 'c_tv_sec') + rffi.getintfield(deadline[0], 'c_tv_nsec') / 1000000000) rffi.setintfield(deadline[0], 'c_tv_sec', val) val = rffi.getintfield(deadline[0], 'c_tv_nsec') % 1000000000 rffi.setintfield(deadline[0], 'c_tv_nsec', val) try: while True: try: if not block: sem_trywait(self.handle) elif not deadline: sem_wait(self.handle) else: sem_timedwait(self.handle, deadline) except OSError as e: if e.errno == errno.EINTR: # again _check_signals(space) continue elif e.errno in (errno.EAGAIN, errno.ETIMEDOUT): return False raise _check_signals(space) self.last_tid = rthread.get_ident() self.count += 1 return True finally: if deadline: lltype.free(deadline, flavor='raw') def semlock_release(self, space): if self.kind == RECURSIVE_MUTEX: sem_post(self.handle) return if HAVE_BROKEN_SEM_GETVALUE: # We will only check properly the maxvalue == 1 case if self.maxvalue == 1: # make sure that already locked try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise # it is already locked as expected else: # it was not locked so undo wait and raise sem_post(self.handle) raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: # This check is not an absolute guarantee that the semaphore does # not rise above maxvalue. if sem_getvalue(self.handle) >= self.maxvalue: raise oefmt(space.w_ValueError, "semaphore or lock released too many times") sem_post(self.handle) def semlock_getvalue(self, space): if HAVE_BROKEN_SEM_GETVALUE: raise oefmt(space.w_NotImplementedError, "sem_getvalue is not implemented on this system") else: val = sem_getvalue(self.handle) # some posix implementations use negative numbers to indicate # the number of waiting threads if val < 0: val = 0 return val def semlock_iszero(self, space): if HAVE_BROKEN_SEM_GETVALUE: try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise return True else: sem_post(self.handle) return False else: return semlock_getvalue(self, space) == 0 class W_SemLock(W_Root): def __init__(self, space, handle, kind, maxvalue, name): self.handle = handle self.kind = kind self.count = 0 self.maxvalue = maxvalue self.register_finalizer(space) self.last_tid = -1 self.name = name def name_get(self, space): if self.name is None: return space.w_None return space.newtext(self.name) def kind_get(self, space): return space.newint(self.kind) def maxvalue_get(self, space): return space.newint(self.maxvalue) def handle_get(self, space): h = rffi.cast(rffi.INTPTR_T, self.handle) return space.newint(h) def get_count(self, space): return space.newint(self.count) def _ismine(self): return self.count > 0 and rthread.get_ident() == self.last_tid def is_mine(self, space): return space.newbool(self._ismine()) def is_zero(self, space): try: res = semlock_iszero(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newbool(res) def get_value(self, space): try: val = semlock_getvalue(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newint(val) @unwrap_spec(block=bool) def acquire(self, space, block=True, w_timeout=None): # check whether we already own the lock if self.kind == RECURSIVE_MUTEX and self._ismine(): self.count += 1 return space.w_True try: # sets self.last_tid and increments self.count # those steps need to be as close as possible to # acquiring the semlock for self._ismine() to support # multiple threads got = semlock_acquire(self, space, block, w_timeout) except OSError as e: raise wrap_oserror(space, e) if got: return space.w_True else: return space.w_False def release(self, space): if self.kind == RECURSIVE_MUTEX: if not self._ismine(): raise oefmt(space.w_AssertionError, "attempt to release recursive lock not owned by " "thread") if self.count > 1: self.count -= 1 return try: # Note: a succesful semlock_release() must not release the GIL, # otherwise there is a race condition on self.count semlock_release(self, space) self.count -= 1 except OSError as e: raise wrap_oserror(space, e) def after_fork(self): self.count = 0 @unwrap_spec(kind=int, maxvalue=int, name='text_or_none') def rebuild(space, w_cls, w_handle, kind, maxvalue, name): # if sys_platform != 'win32' and name is not None: # like CPython, in this case ignore 'w_handle' try: handle = reopen_semaphore(name) except OSError as e: raise wrap_oserror(space, e) else: handle = handle_w(space, w_handle) # self = space.allocate_instance(W_SemLock, w_cls) self.__init__(space, handle, kind, maxvalue, name) return self def enter(self, space): return self.acquire(space, w_timeout=space.w_None) def exit(self, space, __args__): self.release(space) def _finalize_(self): delete_semaphore(self.handle) @unwrap_spec(kind=int, value=int, maxvalue=int, name='text', unlink=int) def descr_new(space, w_subtype, kind, value, maxvalue, name, unlink): if kind != RECURSIVE_MUTEX and kind != SEMAPHORE: raise oefmt(space.w_ValueError, "unrecognized kind") counter = space.fromcache(CounterState).getCount() try: handle = create_semaphore(space, name, value, maxvalue) if unlink: sem_unlink(name) name = None except OSError as e: raise wrap_oserror(space, e) self = space.allocate_instance(W_SemLock, w_subtype) self.__init__(space, handle, kind, maxvalue, name) return self W_SemLock.typedef = TypeDef( "SemLock", __new__ = interp2app(descr_new), kind = GetSetProperty(W_SemLock.kind_get), maxvalue = GetSetProperty(W_SemLock.maxvalue_get), handle = GetSetProperty(W_SemLock.handle_get), name = GetSetProperty(W_SemLock.name_get), _count = interp2app(W_SemLock.get_count), _is_mine = interp2app(W_SemLock.is_mine), _is_zero = interp2app(W_SemLock.is_zero), _get_value = interp2app(W_SemLock.get_value), acquire = interp2app(W_SemLock.acquire), release = interp2app(W_SemLock.release), _rebuild = interp2app(W_SemLock.rebuild.im_func, as_classmethod=True), _after_fork = interp2app(W_SemLock.after_fork), __enter__=interp2app(W_SemLock.enter), __exit__=interp2app(W_SemLock.exit), SEM_VALUE_MAX=SEM_VALUE_MAX, ) def _check_signals(space): space.getexecutioncontext().checksignals() uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/interp_win32_py3.py000066400000000000000000000032231455552142400304170ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from rpython.rlib._rsocket_rffi import socketclose, geterrno, socketrecv, send from rpython.rlib import rwin32 from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec def getWindowsError(space): errno = geterrno() message = rwin32.FormatErrorW(errno) w_errcode = space.newint(errno) return OperationError(space.w_WindowsError, space.newtuple([w_errcode, space.newtext(*message), space.w_None, w_errcode])) @unwrap_spec(handle=int) def multiprocessing_closesocket(space, handle): res = socketclose(handle) if res != 0: raise getWindowsError(space) @unwrap_spec(handle=int, buffersize=int) def multiprocessing_recv(space, handle, buffersize): with rffi.scoped_alloc_buffer(buffersize) as buf: read_bytes = socketrecv(handle, buf.raw, buffersize, 0) if read_bytes >= 0: return space.newbytes(buf.str(read_bytes)) raise getWindowsError(space) @unwrap_spec(handle=int, data='bufferstr') def multiprocessing_send(space, handle, data): if data is None: raise OperationError(space.w_ValueError, 'data cannot be None') with rffi.scoped_nonmovingbuffer(data) as dataptr: # rsocket checks for writability of socket with wait_for_data, cpython does check res = send(handle, dataptr, len(data), 0) if res < 0: raise getWindowsError(space) return space.newint(res) def handle_w(space, w_handle): return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) _GetTickCount = rwin32.winexternal( 'GetTickCount', [], rwin32.DWORD) uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/moduledef.py000066400000000000000000000010531455552142400272440ustar00rootroot00000000000000import sys from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): interpleveldefs = { 'SemLock' : 'interp_semaphore.W_SemLock', } appleveldefs = { } if sys.platform == 'win32': interpleveldefs['closesocket'] = 'interp_win32_py3.multiprocessing_closesocket' interpleveldefs['recv'] = 'interp_win32_py3.multiprocessing_recv' interpleveldefs['send'] = 'interp_win32_py3.multiprocessing_send' interpleveldefs['sem_unlink'] = 'interp_semaphore.semaphore_unlink' uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/000077500000000000000000000000001455552142400257065ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/__init__.py000066400000000000000000000000001455552142400300050ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/test_interp_semaphore.py000066400000000000000000000042471455552142400326720ustar00rootroot00000000000000import pytest import time import sys from rpython.rlib.rgil import yield_thread from pypy.interpreter.gateway import interp2app from pypy.module.thread.os_lock import _set_sentinel from pypy.module.thread.os_thread import start_new_thread from pypy.module._multiprocessing.interp_semaphore import ( create_semaphore, delete_semaphore, W_SemLock, sem_unlink) @pytest.mark.skipif(sys.platform == 'win32', reason='hangs on win32') @pytest.mark.parametrize('spaceconfig', [ {'usemodules': ['_multiprocessing', 'thread']}]) @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semlock_release(space): # trigger the setup() code in time.moduledef space.getbuiltinmodule('time') sem_name = '/test8' _handle = create_semaphore(space, sem_name, 1, 1) try: sem_unlink(sem_name) w_lock = W_SemLock(space, _handle, 0, 1, None) created = [] successful = [] N_THREADS = 16 def run(space): w_sentinel = _set_sentinel(space) yield_thread() w_sentinel.descr_lock_acquire(space) # releases GIL try: yield_thread() created.append(w_sentinel) w_got = w_lock.acquire(space, w_timeout=space.newfloat(5.)) # releases GIL if space.is_true(w_got): yield_thread() w_lock.release(space) successful.append(w_sentinel) except: import traceback traceback.print_exc() raise w_run = space.wrap(interp2app(run)) w_lock.acquire(space) for _ in range(N_THREADS): start_new_thread(space, w_run, space.newtuple([])) # releases GIL deadline = time.time() + 5. while len(created) < N_THREADS: assert time.time() < deadline yield_thread() w_lock.release(space) for w_sentinel in created: # Join thread w_sentinel.descr_lock_acquire(space) # releases GIL w_sentinel.descr_lock_release(space) assert len(successful) == N_THREADS finally: delete_semaphore(_handle) uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/test_semaphore.py000066400000000000000000000147421455552142400313120ustar00rootroot00000000000000import py import sys import pytest from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) class AppTestSemaphore: spaceconfig = dict(usemodules=('_multiprocessing', 'thread', 'signal', 'select', 'binascii', 'struct', '_posixsubprocess')) if sys.platform == 'win32': spaceconfig['usemodules'] += ('_rawffi', '_cffi_backend') else: spaceconfig['usemodules'] += ('fcntl',) def setup_class(cls): cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE) cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX) cls.w_runappdirect = cls.space.wrap(cls.runappdirect) @py.test.mark.skipif("sys.platform == 'win32'") def test_sem_unlink(self): from _multiprocessing import sem_unlink import errno try: sem_unlink("non-existent") except OSError as e: assert e.errno in (errno.ENOENT, errno.EINVAL) else: assert 0, "should have raised" def test_semaphore_basic(self): from _multiprocessing import SemLock import sys assert SemLock.SEM_VALUE_MAX > 10 kind = self.SEMAPHORE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "1", unlink=True) assert sem.kind == kind assert sem.maxvalue == maxvalue assert isinstance(sem.handle, int) assert sem.name is None assert sem._count() == 0 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 1 assert sem._is_zero() == False sem.acquire() assert sem._is_mine() assert sem._count() == 1 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 0 assert sem._is_zero() == True sem.release() assert sem._count() == 0 sem.acquire() sem._after_fork() assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_recursive(self): from _multiprocessing import SemLock kind = self.RECURSIVE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "2", unlink=True) sem.acquire() sem.release() assert sem._count() == 0 sem.acquire() sem.release() # now recursively sem.acquire() sem.acquire() assert sem._count() == 2 sem.release() sem.release() @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_maxvalue(self): from _multiprocessing import SemLock import sys kind = self.SEMAPHORE value = SemLock.SEM_VALUE_MAX maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.0", unlink=True) for i in range(10): res = sem.acquire() assert res == True assert sem._count() == i+1 if sys.platform != 'darwin': assert sem._get_value() == maxvalue - (i+1) value = 0 maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.1", unlink=True) for i in range(10): sem.release() assert sem._count() == -(i+1) if sys.platform != 'darwin': assert sem._get_value() == i+1 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_wait(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "3", unlink=True) res = sem.acquire() assert res == True res = sem.acquire(timeout=0.1) assert res == False @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_rebuild(self): import sys if sys.platform == 'win32': from _multiprocessing import SemLock def sem_unlink(*args): pass else: from _multiprocessing import SemLock, sem_unlink kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "4.2", unlink=False) try: sem2 = SemLock._rebuild(-1, kind, value, "4.2") #assert sem.handle != sem2.handle---even though they come # from different calls to sem_open(), on Linux at least, # they are the same pointer sem2 = SemLock._rebuild(sem.handle, kind, value, None) assert sem.handle == sem2.handle finally: sem_unlink("4.2") @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_contextmanager(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "5", unlink=True) with sem: assert sem._count() == 1 assert sem._count() == 0 def test_unlink(self): from _multiprocessing import SemLock sem = SemLock(self.SEMAPHORE, 1, 1, '/mp-123', unlink=True) assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_in_threads(self): from _multiprocessing import SemLock from threading import Thread from time import sleep l = SemLock(0, 1, 1, "6", unlink=True) if self.runappdirect: def f(id): for i in range(10000): pass else: def f(id): for i in range(1000): # reduce the probability of thread switching # at exactly the wrong time in semlock_acquire for j in range(10): pass threads = [Thread(None, f, args=(i,)) for i in range(2)] [t.start() for t in threads] # if the RLock calls to sem_wait and sem_post do not match, # one of the threads will block and the call to join will fail [t.join() for t in threads] uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/test_win32.py000066400000000000000000000016211455552142400302610ustar00rootroot00000000000000import py import sys @py.test.mark.skipif('sys.platform != "win32"') class AppTestWin32: spaceconfig = dict(usemodules=('_multiprocessing', '_cffi_backend', 'signal', '_rawffi', 'binascii', '_socket', 'select')) def setup_class(cls): # import here since importing _multiprocessing imports multiprocessing # (in interp_connection) to get the BufferTooShort exception, which on # win32 imports msvcrt which imports via cffi which allocates ccharp # that are never released. This trips up the LeakChecker if done in a # test function cls.w_multiprocessing = cls.space.appexec([], '(): import multiprocessing as m; return m') def test_closesocket(self): from _multiprocessing import closesocket raises(WindowsError, closesocket, -1) uqfoundation-multiprocess-b3457a5/pypy3.8/module/_multiprocess/test/test_ztranslation.py000066400000000000000000000001651455552142400320510ustar00rootroot00000000000000from pypy.objspace.fake.checkmodule import checkmodule def test_checkmodule(): checkmodule('_multiprocessing') uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/000077500000000000000000000000001455552142400233035ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/__init__.py000066400000000000000000000035001455552142400254120ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/connection.py000066400000000000000000000761431455552142400260270ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/context.py000066400000000000000000000260061455552142400253450ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/dummy/000077500000000000000000000000001455552142400244365ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400265640ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400271550ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/forkserver.py000066400000000000000000000303601455552142400260470ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: if os.WIFSIGNALED(sts): returncode = -os.WTERMSIG(sts) else: if not os.WIFEXITED(sts): raise AssertionError( "Child {0:n} status is {1:n}".format( pid,sts)) returncode = os.WEXITSTATUS(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/heap.py000066400000000000000000000265521455552142400246040ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/managers.py000066400000000000000000001375211455552142400254630ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', 'SharedMemoryManager' ] # # Imports # import sys import threading import signal import array import queue import time import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError as second_ke: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception as e: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(*args, **kwds): ''' Create a new shared object and return its id ''' if len(args) >= 3: self, c, typeid, *args = args elif not args: raise TypeError("descriptor 'create' of 'Server' object " "needs an argument") else: if 'typeid' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) typeid = kwds.pop('typeid') if len(args) >= 2: self, c, *args = args import warnings warnings.warn("Passing 'typeid' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: if 'c' not in kwds: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) c = kwds.pop('c') self, *args = args import warnings warnings.warn("Passing 'c' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) args = tuple(args) with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)' def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(*args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if len(args) >= 3: typeod = args[2] elif 'typeid' in kwargs: typeid = kwargs['typeid'] elif not args: raise TypeError("descriptor 'create' of 'SharedMemoryServer' " "object needs an argument") else: raise TypeError('create expected at least 2 positional ' 'arguments, got %d' % (len(args)-1)) if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(*args, **kwargs) create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)' def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/pool.py000066400000000000000000000773751455552142400246510ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import warnings from queue import Empty # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/popen_fork.py000066400000000000000000000050051455552142400260170ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError as e: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts), "Status is {:n}".format(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400272530ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400274420ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400272430ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/process.py000066400000000000000000000273371455552142400253470ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if not e.args: exitcode = 1 elif isinstance(e.args[0], int): exitcode = e.args[0] else: sys.stderr.write(str(e.args[0]) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/queues.py000066400000000000000000000270361455552142400251740ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True try: self._reader.close() finally: close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/reduction.py000066400000000000000000000226451455552142400256620ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/resource_sharer.py000066400000000000000000000123501455552142400270510ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._old_locks = [] self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() # If self._lock was locked at the time of the fork, it may be broken # -- see issue 6721. Replace it without letting it be gc'ed. self._old_locks.append(self._lock) self._lock = threading.Lock() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/resource_tracker.py000066400000000000000000000207701455552142400272250ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/shared_memory.py000066400000000000000000000420161455552142400265160ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise from .resource_tracker import register register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) size = _winapi.VirtualQuerySize(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: from .resource_tracker import unregister _posixshmem.shm_unlink(self._name) unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if sequence is not None: _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len self._allocated_bytes = tuple( self._alignment if fmt[-1] != "s" else int(fmt[:-1]) for fmt in _formats ) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) else: requested_size = 8 # Some platforms require > 0. if name is not None and sequence is None: self.shm = SharedMemory(name) else: self.shm = SharedMemory(name, create=True, size=requested_size) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_bytes) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_bytes = struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): try: offset = self._offset_data_start \ + sum(self._allocated_bytes[:position]) (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): try: offset = self._offset_data_start \ + sum(self._allocated_bytes[:position]) current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > self._allocated_bytes[position]: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( self._allocated_bytes[position], ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored values." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for metainfo on storage sizes." return f"{self._list_len}q" @property def _format_packing_metainfo(self): "The struct packing format used for the values' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the values' back transforms." return "b" * self._list_len @property def _offset_data_start(self): return (self._list_len + 1) * 8 # 8 bytes per "q" @property def _offset_packing_formats(self): return self._offset_data_start + sum(self._allocated_bytes) @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400263560ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/spawn.py000066400000000000000000000221151455552142400250060ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/synchronize.py000066400000000000000000000270671455552142400262440ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except (ImportError): try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/000077500000000000000000000000001455552142400244455ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/__init__.py000066400000000000000000005622441455552142400265730ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support # Skip tests if _multiprocessing wasn't built. _multiprocessing = test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None # # # # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused support.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = getattr(time,'monotonic',time.time)() try: return self.func(*args, **kwds) finally: self.elapsed = getattr(time,'monotonic',time.time)() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=60): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=5) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @unittest.skipIf(True, 'bad pipe in pypy3') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.0002) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() for i in range(3): gc.collect() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(test.support.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) for reason in (True, False, 8): p = self.Process(target=sys.exit, args=(reason,)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, reason) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with test.support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with test.support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = getattr(time,'monotonic',time.time)() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = getattr(time,'monotonic',time.time)() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass # bpo-30595: use a timeout of 1 second for slow buildbots self.assertTrue(q.get(timeout=1.0)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=1.0)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = getattr(time,'monotonic',time.time)() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = getattr(time,'monotonic',time.time)() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = getattr(time,'monotonic',time.time)() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs for i in range(3): gc.collect() time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with support.check_warnings(('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) @unittest.skipIf(True, 'bad exitcode in pypy3') def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) @unittest.skipIf(True, 'bad exitcode in pypy3') def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) @unittest.skipIf(True, 'bad exitcode in pypy3') def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. # Changed on PyPy: passing functions to xmlrpc is broken #self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((test.support.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def _test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) @test.support.cpython_only def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocessing tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocess tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + 60 t = 0.1 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): for i in range(3): gc.collect() self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a import gc; gc.collect() b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called import gc; gc.collect() c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @test.support.cpython_only def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with test.support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((test.support.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = getattr(time,'monotonic',time.time)() res = wait([a, b], expected) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], expected + 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = getattr(time,'monotonic',time.time)() res = wait([a], timeout=-1) t = getattr(time,'monotonic',time.time)() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def _test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + 60 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) os.waitpid(pid, 0) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in blacklist are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, blacklist=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400265360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400274500ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400271460ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400323320ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400335600ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271601455552142400341630ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support # Skip tests if _multiprocessing wasn't built. support.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. support.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400325150ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.8/multiprocess/util.py000066400000000000000000000331621455552142400246370ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError('address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._reset() register_after_fork(self, ForkAwareThreadLock._reset) def _reset(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/pypy3.9/000077500000000000000000000000001455552142400205535ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/README_MODS000066400000000000000000000175201455552142400222620ustar00rootroot00000000000000cp -rf pypy3.8/examples . cp -rf pypy3.8/doc . cp -f pypy3.8/index.html . cp -rf pypy3.8/module . cp -rf pypy3.9-v7.3.8-src/lib-python/3/multiprocessing multiprocess cp -rf pypy3.9-v7.3.8-src/lib-python/3/test/test_multiprocessing_*py multiprocess/tests cp -rf pypy3.9-v7.3.8-src/lib-python/3/test/_test_multiprocessing.py multiprocess/tests/__init__.py cp -rf pypy3.9-v7.3.8-src/lib-python/3/test/mp_*py multiprocess/tests/ cp -rf pypy3.8/multiprocess/tests/__main__.py multiprocess/tests/ cp -rf pypy3.8/_multiprocess . # ---------------------------------------------------------------------- EDIT multiprocess/__init__: __version__ EDIT time.monotonic --> getattr(time,'monotonic',time.time) EDIT "spawn" --> "fork" for darwin context EDIT multiprocess: multiprocessing --> multiprocess EDIT multiprocess: pickle --> dill ADDED *args, **kwds for ForkingPickler in __init__ and dump EDIT multiprocess/dummy: multiprocessing --> multiprocess EDIT multiprocess/tests: multiprocessing --> multiprocess # ---------------------------------------------------------------------- diff py3.9/multiprocess/forkserver.py pypy3.9/multiprocess/forkserver.py 240a241 > diff py3.9/multiprocess/managers.py pypy3.9/multiprocess/managers.py 1154c1154 < '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', --- > '__setitem__', 'clear', 'copy', 'get', 'items', diff py3.9/multiprocess/shared_memory.py pypy3.9/multiprocess/shared_memory.py 320,322d319 < _recreation_codes = [ < self._extract_recreation_code(item) for item in sequence < ] 428c425 < offset = self._offset_data_start + self._allocated_offsets[position] --- > offset = self._offset_data_start + self._allocated_offsets[position] diff py3.9/multiprocess/synchronize.py pypy3.9/multiprocess/synchronize.py 32c32 < except ImportError: --- > except (ImportError): # ---------------------------------------------------------------------- diff py3.9/multiprocess/tests/__init__.py pypy3.9/multiprocess/tests/__init__.py 568a569 > @unittest.skipIf(True, 'bad pipe in pypy3') 612c613,614 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 1940a1943 > @unittest.skipIf(True, 'bad timeout in pypy3') 1959a1963 > @unittest.skipIf(True, 'bad timeout in pypy3') 2671c2673,2674 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 2759a2763,2765 > sm = multiprocessing.get_start_method() > if sm == 'fork' and sys.implementation.name == 'pypy': > self.skipTest("race condition on PyPy") 2968c2974,2975 < self.assertRaises(Exception, queue.put, time.sleep) --- > # Changed on PyPy: passing functions to xmlrpc is broken > #self.assertRaises(Exception, queue.put, time.sleep) 3608c3615 < def test_heap(self): --- > def _test_heap(self): 3665a3673,3674 > support.gc_collect() # for PyPy and other GCs > 3671a3681 > @test.support.cpython_only 3778c3788 < # running multiprocessing tests (test_multiprocessing_fork, --- > # running multiprocess tests (test_multiprocessing_fork, 4191c4200,4201 < gc.collect() # For PyPy or other GCs. --- > for i in range(3): > gc.collect() 4245a4256 > @test.support.cpython_only 5101c5112 < def test_preload_resources(self): --- > def _test_preload_resources(self): # ---------------------------------------------------------------------- $ diff pypy3.9-v7.3.8-src/lib-python/3/test/_test_multiprocessing.py pypy3.9-v7.3.9-src/lib-python/3/test/_test_multiprocessing.py 71a72,77 > if support.check_sanitizer(address=True): > # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to > # work around a libasan race condition: dead lock in pthread_create(). > raise unittest.SkipTest("libasan has a pthread_create() dead lock") > # ---------------------------------------------------------------------- diff pypy3.9-v7.3.9-src/pypy/module/_multiprocessing/interp_memory.py pypy3.9-v7.3.10-src/pypy/module/_multiprocessing/interp_memory.py 10,11c10,11 < return space.newtuple([space.newint(address), < space.newint(mmap.mmap.size)]) --- > return space.newtuple2(space.newint(address), > space.newint(mmap.mmap.size)) # ---------------------------------------------------------------------- diff pypy3.9-v7.3.9-src/lib-python/3/multiprocessing/connection.py pypy3.9-v7.3.10-src/lib-python/3/multiprocessing/connection.py 76,80d75 < # Prefer abstract sockets if possible to avoid problems with the address < # size. When coding portable applications, some implementations have < # sun_path as short as 92 bytes in the sockaddr_un struct. < if util.abstract_sockets_supported: < return f"\0listener-{os.getpid()}-{next(_mmap_counter)}" diff pypy3.9-v7.3.9-src/lib-python/3/multiprocessing/managers.py pypy3.9-v7.3.10-src/lib-python/3/multiprocessing/managers.py 672c672 < process.join(timeout=0.1) --- > process.join(timeout=1.0) diff pypy3.9-v7.3.9-src/lib-python/3/multiprocessing/queues.py pypy3.9-v7.3.10-src/lib-python/3/multiprocessing/queues.py 142,148c142,145 < try: < self._reader.close() < finally: < close = self._close < if close: < self._close = None < close() --- > close = self._close > if close: > self._close = None > close() 172,173c169,171 < self._wlock, self._writer.close, self._ignore_epipe, < self._on_queue_feeder_error, self._sem), --- > self._wlock, self._reader.close, self._writer.close, > self._ignore_epipe, self._on_queue_feeder_error, > self._sem), 214,215c212,213 < def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, < onerror, queue_sem): --- > def _feed(buffer, notempty, send_bytes, writelock, reader_close, > writer_close, ignore_epipe, onerror, queue_sem): 241c239,240 < close() --- > reader_close() > writer_close() diff pypy3.9-v7.3.9-src/lib-python/3/multiprocessing/util.py pypy3.9-v7.3.10-src/lib-python/3/multiprocessing/util.py 123c123 < raise TypeError('address type of {address!r} unrecognized') --- > raise TypeError(f'address type of {address!r} unrecognized') # ---------------------------------------------------------------------- diff pypy3.9-v7.3.9-src/pypy/module/_multiprocessing/test/test_interp_semaphore.py pypy3.9-v7.3.11-src/pypy/module/_multiprocessing/test/test_interp_semaphore.py 5d4 < from pypy.tool.pytest.objspace import gettestobjspace 15a15 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") diff pypy3.9-v7.3.9-src/pypy/module/_multiprocessing/test/test_semaphore.py pypy3.9-v7.3.11-src/pypy/module/_multiprocessing/test/test_semaphore.py 2a3 > import pytest 70a72 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 92a95 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 117a121 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 129a134 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 151a157 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") 167a174 > @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") # ---------------------------------------------------------------------- NOTE: semaphore_tracker throws KeyError in multiprocess and multiprocessing Traceback (most recent call last): File "multiprocess/resource_tracker.py", line 204, in main cache[rtype].remove(name) KeyError: '/mp-v2zejd7s' uqfoundation-multiprocess-b3457a5/pypy3.9/_multiprocess/000077500000000000000000000000001455552142400234435ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/_multiprocess/__init__.py000066400000000000000000000005011455552142400255500ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE from _multiprocessing import * uqfoundation-multiprocess-b3457a5/pypy3.9/doc/000077500000000000000000000000001455552142400213205ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/doc/CHANGES.html000066400000000000000000001133431455552142400232630ustar00rootroot00000000000000 Changelog for processing

Changelog for processing

Changes in 0.52

  • On versions 0.50 and 0.51 Mac OSX Lock.release() would fail with OSError(errno.ENOSYS, "[Errno 78] Function not implemented"). This appears to be because on Mac OSX sem_getvalue() has not been implemented.

    Now sem_getvalue() is no longer needed. Unfortunately, however, on Mac OSX BoundedSemaphore() will not raise ValueError if it exceeds its initial value.

  • Some changes to the code for the reduction/rebuilding of connection and socket objects so that things work the same on Windows and Unix. This should fix a couple of bugs.

  • The code has been changed to consistently use "camelCase" for methods and (non-factory) functions. In the few cases where this has meant a change to the documented API, the old name has been retained as an alias.

Changes in 0.51

  • In 0.50 processing.Value() and processing.sharedctypes.Value() were related but had different signatures, which was rather confusing.

    Now processing.sharedctypes.Value() has been renamed processing.sharedctypes.RawValue() and processing.sharedctypes.Value() is the same as processing.Value().

  • In version 0.50 sendfd() and recvfd() apparently did not work on 64bit Linux. This has been fixed by reverting to using the CMSG_* macros as was done in 0.40.

    However, this means that systems without all the necessary CMSG_* macros (such as Solaris 8) will have to disable compilation of sendfd() and recvfd() by setting macros['HAVE_FD_TRANSFRER'] = 0 in setup.py.

  • Fixed an authentication error when using a "remote" manager created using BaseManager.from_address().

  • Fixed a couple of bugs which only affected Python 2.4.

Changes in 0.50

  • ctypes is now a prerequisite if you want to use shared memory -- with Python 2.4 you will need to install it separately.

  • LocalManager() has been removed.

  • Added processing.Value() and processing.Array() which are similar to LocalManager.SharedValue() and LocalManager.SharedArray().

  • In the sharedctypes module new_value() and new_array() have been renamed Value() and Array().

  • Process.stop(), Process.getStoppable() and Process.setStoppable() have been removed. Use Process.terminate() instead.

  • procesing.Lock now matches threading.Lock behaviour more closely: now a thread can release a lock it does not own, and now when a thread tries acquiring a lock it already owns a deadlock results instead of an exception.

  • On Windows when the main thread is blocking on a method of Lock, RLock, Semaphore, BoundedSemaphore, Condition it will no longer ignore Ctrl-C. (The same was already true on Unix.)

    This differs from the behaviour of the equivalent objects in threading which will completely ignore Ctrl-C.

  • The test sub-package has been replaced by lots of unit tests in a tests sub-package. Some of the old test files have been moved over to a new examples sub-package.

  • On Windows it is now possible for a non-console python program (i.e. one using pythonw.exe instead of python.exe) to use processing.

    Previously an exception was raised when subprocess.py tried to duplicate stdin, stdout, stderr.

  • Proxy objects should now be thread safe -- they now use thread local storage.

  • Trying to transfer shared resources such as locks, queues etc between processes over a pipe or queue will now raise RuntimeError with a message saying that the object should only be shared between processes using inheritance.

    Previously, this worked unreliably on Windows but would fail with an unexplained AssertionError on Unix.

  • The names of some of the macros used for compiling the extension have changed. See INSTALL.txt and setup.py.

  • A few changes which (hopefully) make compilation possible on Solaris.

  • Lots of refactoring of the code.

  • Fixed reference leaks so that unit tests pass with "regrtest -R::" (at least on Linux).

Changes in 0.40

  • Removed SimpleQueue and PosixQueue types. Just use Queue instead.

  • Previously if you forgot to use the

    if __name__ == '__main__':
        freezeSupport()
        ...
    

    idiom on Windows then processes could be created recursively bringing the computer to its knees. Now RuntimeError will be raised instead.

  • Some refactoring of the code.

  • A Unix specific bug meant that a child process might fail to start a feeder thread for a queue if its parent process had already started its own feeder thread. Fixed.

Changes in 0.39

  • One can now create one-way pipes by doing reader, writer = Pipe(duplex=False).

  • Rewrote code for managing shared memory maps.

  • Added a sharedctypes module for creating ctypes objects allocated from shared memory. On Python 2.4 this requires the installation of ctypes.

    ctypes objects are not protected by any locks so you will need to synchronize access to them (such as by using a lock). However they can be much faster to access than equivalent objects allocated using a LocalManager.

  • Rearranged documentation.

  • Previously the C extension caused a segfault on 64 bit machines with Python 2.5 because it used int instead of Py_ssize_t in certain places. This is now fixed. Thanks to Alexy Khrabrov for the report.

  • A fix for Pool.terminate().

  • A fix for cleanup behaviour of Queue.

Changes in 0.38

  • Have revamped the queue types. Now the queue types are Queue, SimpleQueue and (on systems which support it) PosixQueue.

    Now Queue should behave just like Python's normal Queue.Queue class except that qsize(), task_done() and join() are not implemented. In particular, if no maximum size was specified when the queue was created then put() will always succeed without blocking.

    A SimpleQueue instance is really just a pipe protected by a couple of locks. It has get(), put() and empty() methods but does not not support timeouts or non-blocking.

    BufferedPipeQueue() and PipeQueue() remain as deprecated aliases of Queue() but BufferedPosixQueue() has been removed. (Not sure if we really need to keep PosixQueue()...)

  • Previously the Pool.shutdown() method was a little dodgy -- it could block indefinitely if map() or imap*() were used and did not try to terminate workers while they were doing a task.

    Now there are three new methods close(), terminate() and join() -- shutdown() is retained as a deprecated alias of terminate(). Thanks to Gerald John M. Manipon for feature request/suggested patch to shutdown().

  • Pool.imap() and Pool.imap_unordered() has gained a chunksize argument which allows the iterable to be submitted to the pool in chunks. Choosing chunksize appropriately makes Pool.imap() almost as fast as Pool.map() even for long iterables and cheap functions.

  • Previously on Windows when the cleanup code for a LocalManager attempts to unlink the name of the file which backs the shared memory map an exception is raised if a child process still exists which has a handle open for that mmap. This is likely to happen if a daemon process inherits a LocalManager instance.

    Now the parent process will remember the filename and attempt to unlink the file name again once all the child processes have been joined or terminated. Reported by Paul Rudin.

  • types.MethodType is registered with copy_reg so now instance methods and class methods should be picklable. (Unfortunately there is no obvious way of supporting the pickling of staticmethods since they are not marked with the class in which they were defined.)

    This means that on Windows it is now possible to use an instance method or class method as the target callable of a Process object.

  • On Windows reduction.fromfd() now returns true instances of _socket.socket, so there is no more need for the _processing.falsesocket type.

Changes in 0.37

  • Updated metadata and documentation because the project is now hosted at developer.berlios.de/projects/pyprocessing.
  • The Pool.join() method has been removed. Pool.shutdown() will now join the worker processes automatically.
  • A pool object no longer participates in a reference cycle so Pool.shutdown() should get called as soon as its reference count falls to zero.
  • On Windows if enableLogging() was used at module scope then the logger used by a child process would often get two copies of the same handler. To fix this, now specifiying a handler type in enableLogging() will cause any previous handlers used by the logger to be discarded.

Changes in 0.36

  • In recent versions on Unix the finalizers in a manager process were never given a chance to run before os._exit() was called, so old unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.

  • The shutting down of managers has been cleaned up.

  • In previous versions on Windows trying to acquire a lock owned by a different thread of the current process would raise an exception. Fixed.

  • In previous versions on Windows trying to use an event object for synchronization between two threads of the same process was likely to raise an exception. (This was caused by the bug described above.) Fixed.

  • Previously the arguments to processing.Semaphore() and processing.BoundedSemaphore() did not have any defaults. The defaults should be 1 to match threading. Fixed.

  • It should now be possible for a Windows Service created by using pywin32 to spawn processes using the processing package.

    Note that pywin32 apparently has a bug meaning that Py_Finalize() is never called when the service exits so functions registered with atexit never get a chance to run. Therefore it is advisable to explicitly call sys.exitfunc() or atexit._run_exitfuncs() at the end of ServiceFramework.DoSvcRun(). Otherwise child processes are liable to survive the service when it is stopped. Thanks to Charlie Hull for the report.

  • Added getLogger() and enableLogging() to support logging.

Changes in 0.35

  • By default processes are no longer be stoppable using the stop() method: one must call setStoppable(True) before start() in order to use the stop() method. (Note that terminate() will work regardless of whether the process is marked as being "stoppable".)

    The reason for this is that on Windows getting stop() to work involves starting a new console for the child process and installing a signal handler for the SIGBREAK signal. This unfortunately means that Ctrl-Break cannot not be used to kill all processes of the program.

  • Added setStoppable() and getStoppable() methods -- see above.

  • Added BufferedQueue/BufferedPipeQueue/BufferedPosixQueue. Putting an object on a buffered queue will always succeed without blocking (just like with Queue.Queue if no maximum size is specified). This makes them potentially safer than the normal queue types provided by processing which have finite capacity and may cause deadlocks if they fill.

    test/test_worker.py has been updated to use BufferedQueue for the task queue instead of explicitly spawning a thread to feed tasks to the queue without risking a deadlock.

  • Now when the NO_SEM_TIMED macro is set polling will be used to get around the lack of sem_timedwait(). This means that Condition.wait() and Queue.get() should now work with timeouts on Mac OS X.

  • Added a callback argument to Pool.apply_async().

  • Added test/test_httpserverpool.py which runs a pool of http servers which share a single listening socket.

  • Previously on Windows the process object was passed to the child process on the commandline (after pickling and hex encoding it). This caused errors when the pickled string was too large. Now if the pickled string is large then it will be passed to the child over a pipe or socket.

  • Fixed bug in the iterator returned by Pool.imap().

  • Fixed bug in Condition.__repr__().

  • Fixed a handle/file descriptor leak when sockets or connections are unpickled.

Changes in 0.34

  • Although version 0.33 the C extension would compile on Mac OSX trying to import it failed with "undefined symbol: _sem_timedwait". Unfortunately the ImportError exception was silently swallowed.

    This is now fixed by using the NO_SEM_TIMED macro. Unfortunately this means that some methods like Condition.wait() and Queue.get() will not work with timeouts on Mac OS X. If you really need to be able to use timeouts then you can always use the equivalent objects created with a manager. Thanks to Doug Hellmann for report and testing.

  • Added a terminate() method to process objects which is more forceful than stop().

  • Fixed bug in the cleanup function registered with atexit which on Windows could cause a process which is shutting down to deadlock waiting for a manager to exit. Thanks to Dominique Wahli for report and testing.

  • Added test/test_workers.py which gives an example of how to create a collection of worker processes which execute tasks from one queue and return results on another.

  • Added processing.Pool() which returns a process pool object. This allows one to execute functions asynchronously. It also has a parallel implementation of the map() builtin. This is still experimental and undocumented --- see test/test_pool.py for example usage.

Changes in 0.33

  • Added a recvbytes_into() method for receiving byte data into objects with the writable buffer interface. Also renamed the _recv_string() and _send_string() methods of connection objects to recvbytes() and sendbytes().

  • Some optimizations for the transferring of large blocks of data using connection objects.

  • On Unix os.sysconf() is now used by default to determine whether to compile in support for posix semaphores or posix message queues.

    By using the NO_SEM_TIMED and NO_MQ_TIMED macros (see INSTALL.txt) it should now also be possible to compile in (partial) semaphore or queue support on Unix systems which lack the timeout functions sem_timedwait() or mq_timedreceive() and mq_timesend().

  • gettimeofday() is now used instead of clock_gettime() making compilation of the C extension (hopefully) possible on Mac OSX. No modificaton of setup.py should be necessary. Thanks to Michele Bertoldi for report and proposed patch.

  • cpuCount() function added which returns the number of CPUs in the system.

  • Bugfixes to PosixQueue class.

Changes in 0.32

  • Refactored and simplified _nonforking module -- info about sys.modules of parent process is no longer passed on to child process. Also pkgutil is no longer used.
  • Allocated space from an mmap used by LocalManager will now be recycled.
  • Better tests for LocalManager.
  • Fixed bug in managers.py concerning refcounting of shared objects. Bug affects the case where the callable used to create a shared object does not return a unique object each time it is called. Thanks to Alexey Akimov for the report.
  • Added a freezeSupport() function. Calling this at the appropriate point in the main module is necessary when freezing a multiprocess program to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

Changes in 0.31

  • Fixed one line bug in localmanager.py which caused shared memory maps not to be resized properly.
  • Added tests for shared values/structs/arrays to test/test_processing.

Changes in 0.30

  • Process objects now support the complete API of thread objects.

    In particular isAlive(), isDaemon(), setDaemon() have been added and join() now supports the timeout paramater.

    There are also new methods stop(), getPid() and getExitCode().

  • Implemented synchronization primitives based on the Windows mutexes and semaphores and posix named semaphores.

  • Added support for sharing simple objects between processes by using a shared memory map and the struct or array modules.

  • An activeChildren() function has been added to processing which returns a list of the child processes which are still alive.

  • A Pipe() function has been added which returns a pair of connection objects representing the ends of a duplex connection over which picklable objects can be sent.

  • socket objects etc are now picklable and can be transferred between processes. (Requires compilation of the _processing extension.)

  • Subclasses of managers.BaseManager no longer automatically spawn a child process when an instance is created: the start() method must be called explicitly.

  • On Windows child processes are now spawned using subprocess.

  • On Windows the Python 2.5 version of pkgutil is now used for loading modules by the _nonforking module. On Python 2.4 this version of pkgutil (which uses the standard Python licence) is included in processing.compat.

  • The arguments to the functions in processing.connection have changed slightly.

  • Connection objects now have a poll() method which tests whether there is any data available for reading.

  • The test/py2exedemo folder shows how to get py2exe to create a Windows executable from a program using the processing package.

  • More tests.

  • Bugfixes.

  • Rearrangement of various stuff.

Changes in 0.21

  • By default a proxy is now only able to access those methods of its referent which have been explicitly exposed.
  • The connection sub-package now supports digest authentication.
  • Process objects are now given randomly generated 'inheritable' authentication keys.
  • A manager process will now only accept connections from processes using the same authentication key.
  • Previously get_module() from _nonforking.py was seriously messed up (though it generally worked). It is a lot saner now.
  • Python 2.4 or higher is now required.

Changes in 0.20

  • The doc folder contains HTML documentation.
  • test is now a subpackage. Running processing.test.main() will run test scripts using both processes and threads.
  • nonforking.py has been renamed _nonforking.py. manager.py has been renamed manager.py. connection.py has become a sub-package connection
  • Listener and Client have been removed from processing, but still exist in processing.connection.
  • The package is now probably compatible with versions of Python earlier than 2.4.
  • set is no longer a type supported by the default manager type.
  • Many more changes.

Changes in 0.12

  • Fixed bug where the arguments to processing.Manager() were passed on to processing.manager.DefaultManager() in the wrong order.
  • processing.dummy is now a subpackage of processing instead of a module.
  • Rearranged package so that the test folder, README.txt and CHANGES.txt are copied when the package is installed.

Changes in 0.11

  • Fixed bug on windows when the full path of nonforking.py contains a space.
  • On unix there is no longer a need to make the arguments to the constructor of Process be picklable or for and instance of a subclass of Process to be picklable when you call the start method.
  • On unix proxies which a child process inherits from its parent can be used by the child without any problem, so there is no longer a need to pass them as arguments to Process. (This will never be possible on windows.)
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/COPYING.html000066400000000000000000000040211455552142400233130ustar00rootroot00000000000000

Copyright (c) 2006-2008, R Oudkerk

All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/INSTALL.html000066400000000000000000000063531455552142400233230ustar00rootroot00000000000000 Installation of processing

Installation of processing

Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you should install the ctypes package (which comes automatically with Python 2.5).

Windows binary builds for Python 2.4 and Python 2.5 are available at

http://pyprocessing.berlios.de

or

http://pypi.python.org/pypi/processing

Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:

python setup.py install

It should not be necessary to do any editing of setup.py if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the macros dictionary or libraries list. The section to modify reads

else:
    macros = dict(
        HAVE_SEM_OPEN=1,
        HAVE_SEM_TIMEDWAIT=1,
        HAVE_FD_TRANSFER=1
        )
    libraries = ['rt']

More details can be found in the comments in setup.py.

Note that if you use HAVE_SEM_OPEN=0 then support for posix semaphores will not been compiled in, and then many of the functions in the processing namespace like Lock(), Queue() or will not be available. However, one can still create a manager using manager = processing.Manager() and then do lock = manager.Lock() etc.

Running tests

To run the test scripts using Python 2.5 do

python -m processing.tests

and on Python 2.4 do

python -c "from processing.tests import main; main()"

This will run a number of test scripts using both processes and threads.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/THANKS.html000066400000000000000000000017751455552142400232100ustar00rootroot00000000000000 Thanks

Thanks

Thanks to everyone who has offered bug reports, patches, suggestions:

Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon, Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov, Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan, Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.

Sorry if I have forgotten anyone.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/__init__.py000066400000000000000000000004001455552142400234230ustar00rootroot00000000000000import os import webbrowser def main(): ''' Show html documentation using webbrowser ''' index_html = os.path.join(os.path.dirname(__file__), 'index.html') webbrowser.open(index_html) if __name__ == '__main__': main() uqfoundation-multiprocess-b3457a5/pypy3.9/doc/connection-objects.html000066400000000000000000000152041455552142400257760ustar00rootroot00000000000000 Connection objects
Prev         Up         Next

Connection objects

Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets.

Connection objects usually created using processing.Pipe() -- see also Listener and Clients.

Connection objects have the following methods:

send(obj)

Send an object to the other end of the connection which should be read using recv().

The object must be picklable.

recv()
Return an object sent from the other end of the connection using send(). Raises EOFError if there is nothing left to receive and the other end was closed.
fileno()
Returns the file descriptor or handle used by the connection.
close()

Close the connection.

This is called automatically when the connection is garbage collected.

poll(timeout=0.0)

Return whether there is any data available to be read within timeout seconds.

If timeout is None then an infinite timeout is used.

Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C.

sendBytes(buffer)

Send byte data from an object supporting the buffer interface as a complete message.

Can be used to send strings or a view returned by buffer().

recvBytes()
Return a complete message of byte data sent from the other end of the connection as a string. Raises EOFError if there is nothing left to receive and the other end was closed.
recvBytesInto(buffer, offset=0)

Read into buffer at position offset a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises EOFError if there is nothing left to receive and the other end was closed.

buffer must be an object satisfying the writable buffer interface and offset must be non-negative and less than the length of buffer (in bytes).

If the buffer is too short then a BufferTooShort exception is raised and the complete message is available as e.args[0] where e is the exception instance.

For example:

>>> from processing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.sendBytes('thank you')
>>> a.recvBytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.sendBytes(arr1)
>>> count = b.recvBytesInto(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])

Warning

The recv() method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message.

Therefore, unless the connection object was produced using Pipe() you should only use the recv() and send() methods after performing some sort of authentication. See Authentication keys.

Warning

If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/connection-objects.txt000066400000000000000000000072761455552142400256630ustar00rootroot00000000000000.. include:: header.txt ==================== Connection objects ==================== Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects usually created using `processing.Pipe()` -- see also `Listener and Clients `_. Connection objects have the following methods: `send(obj)` Send an object to the other end of the connection which should be read using `recv()`. The object must be picklable. `recv()` Return an object sent from the other end of the connection using `send()`. Raises `EOFError` if there is nothing left to receive and the other end was closed. `fileno()` Returns the file descriptor or handle used by the connection. `close()` Close the connection. This is called automatically when the connection is garbage collected. `poll(timeout=0.0)` Return whether there is any data available to be read within `timeout` seconds. If `timeout` is `None` then an infinite timeout is used. Unlike the other blocking methods on Windows this method can be interrupted by Ctrl-C. `sendBytes(buffer)` Send byte data from an object supporting the buffer interface as a complete message. Can be used to send strings or a view returned by `buffer()`. `recvBytes()` Return a complete message of byte data sent from the other end of the connection as a string. Raises `EOFError` if there is nothing left to receive and the other end was closed. `recvBytesInto(buffer, offset=0)` Read into `buffer` at position `offset` a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Raises `EOFError` if there is nothing left to receive and the other end was closed. `buffer` must be an object satisfying the writable buffer interface and `offset` must be non-negative and less than the length of `buffer` (in bytes). If the buffer is too short then a `BufferTooShort` exception is raised and the complete message is available as `e.args[0]` where `e` is the exception instance. For example: >>> from processing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.sendBytes('thank you') >>> a.recvBytes() 'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.sendBytes(arr1) >>> count = b.recvBytesInto(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The `recv()` method automatically unpickles the data it receives which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using `Pipe()` you should only use the `recv()` and `send()` methods after performing some sort of authentication. See `Authentication keys `_. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted because it may become impossible to be sure where the message boundaries lie. .. _Prev: queue-objects.html .. _Up: processing-ref.html .. _Next: manager-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/connection-ref.html000066400000000000000000000357371455552142400251360ustar00rootroot00000000000000 Listeners and Clients
Prev         Up         Next

Listeners and Clients

Usually message passing between processes is done using queues or by using connection objects returned by Pipe().

However, the processing.connection module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for digest authentication using the hmac module from the standard library.

Classes and functions

The module defines the following functions:

Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)
Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections.
Client(address, family=None, authenticate=False, authkey=None)

Attempts to set up a connection to the listener which is using address address, returning a connection object.

The type of the connection is determined by family argument, but this can generally be omitted since it can usually be inferred from the format of address.

If authentication or authkey is a string then digest authentication is used. The key used for authentication will be either authkey or currentProcess.getAuthKey() if authkey is None. If authentication fails then AuthenticationError is raised. See Authentication keys.

The module exports two exception types:

exception AuthenticationError
Exception raised when there is an authentication error.
exception BufferTooShort

Exception raise by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Listener objects

Instances of Listener have the following methods:

__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)
address
The address to be used by the bound socket or named pipe of the listener object.
family

The type of the socket (or named pipe) to use.

This can be one of the strings 'AF_INET' (for a TCP socket), 'AF_UNIX' (for a Unix domain socket) or 'AF_PIPE' (for a Windows named pipe). Of these only the first is guaranteed to be available.

If family is None than the family is inferred from the format of address. If address is also None then a default is chosen. This default is the family which is assumed to be the fastest available. See Address formats.

Note that if family is 'AF_UNIX' then the associated file will have only be readable/writable by the user running the current process -- use os.chmod() is you need to let other users access the socket.

backlog
If the listener object uses a socket then backlog is passed to the listen() method of the socket once it has been bound.
authenticate
If authenticate is true or authkey is not None then digest authentication is used.
authkey

If authkey is a string then it will be used as the authentication key; otherwise it must be None.

If authkey is None and authenticate is true then currentProcess.getAuthKey() is used as the authentication key.

If authkey is None and authentication is false then no authentication is done.

If authentication fails then AuthenticationError is raised. See Authentication keys.

accept()

Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then AuthenticationError is raised.

Returns a connection object <connection-object.html> object.

close()

Close the bound socket or named pipe of the listener object.

This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly.

Listener objects have the following read-only properties:

address
The address which is being used by the listener object.
last_accepted

The address from which the last accepted connection came.

If this is unavailable then None is returned.

Address formats

  • An 'AF_INET' address is a tuple of the form (hostname, port) where hostname is a string and port is an integer

  • An 'AF_UNIX' address is a string representing a filename on the filesystem.

  • An 'AF_PIPE' address is a string of the form r'\\.\pipe\PipeName'.

    To use Client to connect to a named pipe on a remote computer called ServerName one should use an address of the form r'\\ServerName\pipe\PipeName' instead.

Note that any string beginning with two backslashes is assumed by default to be an 'AF_PIPE' address rather than an 'AF_UNIX' address.

Authentication keys

When one uses the recv() method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore Listener and Client use the hmac module to provide digest authentication.

An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does not involve sending the key over the connection.)

If authentication is requested but do authentication key is specified then the return value of currentProcess().getAuthKey() is used (see Process objects). This value will automatically inherited by any Process object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves.

Suitable authentication keys can also be generated by using os.urandom().

Example

The following server code creates a listener which uses 'secret password' as an authentication key. It then waits for a connection and sends some data to the client:

from processing.connection import Listener
from array import array

address = ('localhost', 6000)     # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')

conn = listener.accept()
print 'connection accepted from', listener.last_accepted

conn.send([2.25, None, 'junk', float])

conn.sendBytes('hello')

conn.sendBytes(array('i', [42, 1729]))

conn.close()
listener.close()

The following code connects to the server and receives some data from the server:

from processing.connection import Client
from array import array

address = ('localhost', 6000)
conn = Client(address, authkey='secret password')

print conn.recv()                 # => [2.25, None, 'junk', float]

print conn.recvBytes()            # => 'hello'

arr = array('i', [0, 0, 0, 0, 0])
print conn.recvBytesInto(arr)    # => 8
print arr                         # => array('i', [42, 1729, 0, 0, 0])

conn.close()
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/connection-ref.txt000066400000000000000000000210001455552142400247630ustar00rootroot00000000000000.. include:: header.txt ======================= Listeners and Clients ======================= Usually message passing between processes is done using queues or by using connection objects returned by `Pipe()`. However, the `processing.connection` module allows some extra flexibility. It basically gives a high level API for dealing with sockets or Windows named pipes, and also has support for *digest authentication* using the `hmac` module from the standard library. Classes and functions ===================== The module defines the following functions: `Listener(address=None, family=None, backlog=1, authenticate=False, authkey=None)` Returns a wrapper for a bound socket or Windows named pipe which is 'listening' for connections. `Client(address, family=None, authenticate=False, authkey=None)` Attempts to set up a connection to the listener which is using address `address`, returning a `connection object `_. The type of the connection is determined by `family` argument, but this can generally be omitted since it can usually be inferred from the format of `address`. If `authentication` or `authkey` is a string then digest authentication is used. The key used for authentication will be either `authkey` or `currentProcess.getAuthKey()` if `authkey` is `None`. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. .. `deliverChallenge(connection, authkey)` Sends a randomly generated message to the other end of the connection and waits for a reply. If the reply matches the digest of the message using `authkey` as the key then a welcome message is sent to the other end of the connection. Otherwise `AuthenticationError` is raised. `answerChallenge(connection, authkey)` Receives a message, calculates the digest of the message using `authkey` as the key, and then sends the digest back. If a welcome message is not received then `AuthenticationError` is raised. The module exports two exception types: **exception** `AuthenticationError` Exception raised when there is an authentication error. **exception** `BufferTooShort` Exception raise by the `recvBytesInto()` method of a connection object when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Listener objects ================ Instances of `Listener` have the following methods: `__init__(address=None, family=None, backlog=1, authenticate=False, authkey=None)` `address` The address to be used by the bound socket or named pipe of the listener object. `family` The type of the socket (or named pipe) to use. This can be one of the strings `'AF_INET'` (for a TCP socket), `'AF_UNIX'` (for a Unix domain socket) or `'AF_PIPE'` (for a Windows named pipe). Of these only the first is guaranteed to be available. If `family` is `None` than the family is inferred from the format of `address`. If `address` is also `None` then a default is chosen. This default is the family which is assumed to be the fastest available. See `Address formats`_. Note that if `family` is `'AF_UNIX'` then the associated file will have only be readable/writable by the user running the current process -- use `os.chmod()` is you need to let other users access the socket. `backlog` If the listener object uses a socket then `backlog` is passed to the `listen()` method of the socket once it has been bound. `authenticate` If `authenticate` is true or `authkey` is not `None` then digest authentication is used. `authkey` If `authkey` is a string then it will be used as the authentication key; otherwise it must be `None`. If `authkey` is `None` and `authenticate` is true then `currentProcess.getAuthKey()` is used as the authentication key. If `authkey` is `None` and `authentication` is false then no authentication is done. If authentication fails then `AuthenticationError` is raised. See `Authentication keys`_. `accept()` Accept a connection on the bound socket or named pipe of the listener object. If authentication is attempted and fails then `AuthenticationError` is raised. Returns a `connection object ` object. `close()` Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: `address` The address which is being used by the listener object. `last_accepted` The address from which the last accepted connection came. If this is unavailable then `None` is returned. Address formats =============== * An `'AF_INET'` address is a tuple of the form `(hostname, port)` where `hostname` is a string and `port` is an integer * An `'AF_UNIX'` address is a string representing a filename on the filesystem. * An `'AF_PIPE'` address is a string of the form `r'\\\\.\\pipe\\PipeName'`. To use `Client` to connect to a named pipe on a remote computer called `ServerName` one should use an address of the form `r'\\\\ServerName\\pipe\\PipeName'` instead. Note that any string beginning with two backslashes is assumed by default to be an `'AF_PIPE'` address rather than an `'AF_UNIX'` address. Authentication keys =================== When one uses the `recv()` method of a connection object, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore `Listener` and `Client` use the `hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does *not* involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of `currentProcess().getAuthKey()` is used (see `Process objects `_). This value will automatically inherited by any `Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between the themselves. Suitable authentication keys can also be generated by using `os.urandom()`. Example ======= The following server code creates a listener which uses `'secret password'` as an authentication key. It then waits for a connection and sends some data to the client:: from processing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey='secret password') conn = listener.accept() print 'connection accepted from', listener.last_accepted conn.send([2.25, None, 'junk', float]) conn.sendBytes('hello') conn.sendBytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from processing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey='secret password') print conn.recv() # => [2.25, None, 'junk', float] print conn.recvBytes() # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print conn.recvBytesInto(arr) # => 8 print arr # => array('i', [42, 1729, 0, 0, 0]) conn.close() .. _Prev: sharedctypes.html .. _Up: processing-ref.html .. _Next: programming-guidelines.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/header.txt000066400000000000000000000003401455552142400233060ustar00rootroot00000000000000.. default-role:: literal .. header:: Prev_ |spaces| Up_ |spaces| Next_ .. footer:: Prev_ |spaces| Up_ |spaces| Next_ .. |nbsp| unicode:: U+000A0 .. |spaces| replace:: |nbsp| |nbsp| |nbsp| |nbsp| uqfoundation-multiprocess-b3457a5/pypy3.9/doc/html4css1.css000066400000000000000000000126361455552142400236640ustar00rootroot00000000000000/* :Author: David Goodger :Contact: goodger@users.sourceforge.net :Date: $Date: 2008/01/29 22:14:02 $ :Revision: $Revision: 1.1.1.1 $ :Copyright: This stylesheet has been placed in the public domain. Default cascading style sheet for the HTML output of Docutils. See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to customize this style sheet. */ /* used to remove borders from tables and images */ .borderless, table.borderless td, table.borderless th { border: 0 } table.borderless td, table.borderless th { /* Override padding for "table.docutils td" with "! important". The right padding separates the table cells. */ padding: 0 0.5em 0 0 ! important } .first { /* Override more specific margin styles with "! important". */ margin-top: 0 ! important } .last, .with-subtitle { margin-bottom: 0 ! important } .hidden { display: none } a.toc-backref { text-decoration: none ; color: black } blockquote.epigraph { margin: 2em 5em ; } dl.docutils dd { margin-bottom: 0.5em } /* Uncomment (and remove this text!) to get bold-faced definition list terms dl.docutils dt { font-weight: bold } */ div.abstract { margin: 2em 5em } div.abstract p.topic-title { font-weight: bold ; text-align: center } div.admonition, div.attention, div.caution, div.danger, div.error, div.hint, div.important, div.note, div.tip, div.warning { margin: 2em ; border: medium outset ; padding: 1em } div.admonition p.admonition-title, div.hint p.admonition-title, div.important p.admonition-title, div.note p.admonition-title, div.tip p.admonition-title { font-weight: bold ; font-family: sans-serif } div.attention p.admonition-title, div.caution p.admonition-title, div.danger p.admonition-title, div.error p.admonition-title, div.warning p.admonition-title { color: red ; font-weight: bold ; font-family: sans-serif } /* Uncomment (and remove this text!) to get reduced vertical space in compound paragraphs. div.compound .compound-first, div.compound .compound-middle { margin-bottom: 0.5em } div.compound .compound-last, div.compound .compound-middle { margin-top: 0.5em } */ div.dedication { margin: 2em 5em ; text-align: center ; font-style: italic } div.dedication p.topic-title { font-weight: bold ; font-style: normal } div.figure { margin-left: 2em ; margin-right: 2em } div.footer, div.header { clear: both; font-size: smaller } div.line-block { display: block ; margin-top: 1em ; margin-bottom: 1em } div.line-block div.line-block { margin-top: 0 ; margin-bottom: 0 ; margin-left: 1.5em } div.sidebar { margin-left: 1em ; border: medium outset ; padding: 1em ; background-color: #ffffee ; width: 40% ; float: right ; clear: right } div.sidebar p.rubric { font-family: sans-serif ; font-size: medium } div.system-messages { margin: 5em } div.system-messages h1 { color: red } div.system-message { border: medium outset ; padding: 1em } div.system-message p.system-message-title { color: red ; font-weight: bold } div.topic { margin: 2em } h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { margin-top: 0.4em } h1.title { text-align: center } h2.subtitle { text-align: center } hr.docutils { width: 75% } img.align-left { clear: left } img.align-right { clear: right } ol.simple, ul.simple { margin-bottom: 1em } ol.arabic { list-style: decimal } ol.loweralpha { list-style: lower-alpha } ol.upperalpha { list-style: upper-alpha } ol.lowerroman { list-style: lower-roman } ol.upperroman { list-style: upper-roman } p.attribution { text-align: right ; margin-left: 50% } p.caption { font-style: italic } p.credits { font-style: italic ; font-size: smaller } p.label { white-space: nowrap } p.rubric { font-weight: bold ; font-size: larger ; color: maroon ; text-align: center } p.sidebar-title { font-family: sans-serif ; font-weight: bold ; font-size: larger } p.sidebar-subtitle { font-family: sans-serif ; font-weight: bold } p.topic-title { font-weight: bold } pre.address { margin-bottom: 0 ; margin-top: 0 ; font-family: serif ; font-size: 100% } pre.literal-block, pre.doctest-block { margin-left: 2em ; margin-right: 2em ; background-color: #eeeeee } span.classifier { font-family: sans-serif ; font-style: oblique } span.classifier-delimiter { font-family: sans-serif ; font-weight: bold } span.interpreted { font-family: sans-serif } span.option { white-space: nowrap } span.pre { white-space: pre } span.problematic { color: red } span.section-subtitle { /* font-size relative to parent (h1..h6 element) */ font-size: 80% } table.citation { border-left: solid 1px gray; margin-left: 1px } table.docinfo { margin: 2em 4em } table.docutils { margin-top: 0.5em ; margin-bottom: 0.5em } table.footnote { border-left: solid 1px black; margin-left: 1px } table.docutils td, table.docutils th, table.docinfo td, table.docinfo th { padding-left: 0.5em ; padding-right: 0.5em ; vertical-align: top } table.docutils th.field-name, table.docinfo th.docinfo-name { font-weight: bold ; text-align: left ; white-space: nowrap ; padding-left: 0 } h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { font-size: 100% } /* tt.docutils { background-color: #eeeeee } */ ul.auto-toc { list-style-type: none } uqfoundation-multiprocess-b3457a5/pypy3.9/doc/index.html000066400000000000000000000064761455552142400233320ustar00rootroot00000000000000 Documentation for processing-0.52
Prev         Up         Next
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/index.txt000066400000000000000000000021751455552142400231750ustar00rootroot00000000000000.. include:: header.txt .. include:: version.txt ======================================== Documentation for processing-|version| ======================================== :Author: R Oudkerk :Contact: roudkerk at users.berlios.de :Url: http://developer.berlios.de/projects/pyprocessing :Licence: BSD Licence Contents ======== * `Introduction `_ * `Package reference `_ + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes objects `_ + `Listeners and Clients `_ * `Programming guidelines `_ * `Tests and examples `_ See also ======== * `Installation instructions `_ * `Changelog `_ * `Acknowledgments `_ * `Licence `_ .. _Next: intro.html .. _Up: index.html .. _Prev: index.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/intro.html000066400000000000000000000427461455552142400233560ustar00rootroot00000000000000 Introduction
Prev         Up         Next

Introduction

Threads, processes and the GIL

To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads.

Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient.

On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other.

CPython has a Global Interpreter Lock (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C.

One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead.

Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs.

Forking and spawning

There are two ways of creating a new process in Python:

  • The current process can fork a new child process by using the os.fork() function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits copies of all variables that the parent process had.

    However, os.fork() is not available on every platform: in particular Windows does not support it.

  • Alternatively, the current process can spawn a completely new Python interpreter by using the subprocess module or one of the os.spawn*() functions.

    Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge.

The processing package uses os.fork() if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process.

The Process class

In the processing package processes are spawned by creating a Process object and then calling its start() method. processing.Process follows the API of threading.Thread. A trivial example of a multiprocess program is

from processing import Process

def f(name):
    print 'hello', name

if __name__ == '__main__':
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

Here the function f is run in a child process.

For an explanation of why (on Windows) the if __name__ == '__main__' part is necessary see Programming guidelines.

Exchanging objects between processes

processing supports two types of communication channel between processes:

Queues:

The function Queue() returns a near clone of Queue.Queue -- see the Python standard documentation. For example

from processing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    print q.get()    # prints "[42, None, 'hello']"
    p.join()

Queues are thread and process safe. See Queues.

Pipes:

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example

from processing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello'])
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p.start()
    print parent_conn.recv()   # prints "[42, None, 'hello']"
    p.join()

The two connection objects returned by Pipe() represent the two ends of the pipe. Each connection object has send() and recv() methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the same end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See Pipes.

Synchronization between processes

processing contains equivalents of all the synchronization primitives from threading. For instance one can use a lock to ensure that only one process prints to standard output at a time:

from processing import Process, Lock

def f(l, i):
    l.acquire()
    print 'hello world', i
    l.release()

if __name__ == '__main__':
    lock = Lock()

    for num in range(10):
        Process(target=f, args=(lock, num)).start()

Without using the lock output from the different processes is liable to get all mixed up.

Sharing state between processes

As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes.

However, if you really do need to use some shared data then processing provides a couple of ways of doing so.

Shared memory:

Data can be stored in a shared memory map using Value or Array. For example the following code

from processing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

will print

3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]

The 'd' and 'i' arguments used when creating num and arr are typecodes of the kind used by the array module: 'd' indicates a double precision float and 'i' inidicates a signed integer. These shared objects will be process and thread safe.

For more flexibility in using shared memory one can use the processing.sharedctypes module which supports the creation of arbitrary ctypes objects allocated from shared memory.

Server process:

A manager object returned by Manager() controls a server process which holds python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types list, dict, Namespace, Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue, Value and Array. For example:

from processing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = Manager()

    d = manager.dict()
    l = manager.list(range(10))

    p = Process(target=f, args=(d, l))
    p.start()
    p.join()

    print d
    print l

will print

{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]

Creating managers which support other types is not hard --- see Customized managers.

Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See Server process managers.

Using a pool of workers

The Pool() function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways.

For example:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes
    result = pool.applyAsync(f, [10])     # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow
    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

See Process pools.

Speed

The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see benchmarks.py.

Number of 256 byte string objects passed between processes/threads per sec:

Connection type Windows Linux
Queue.Queue 49,000 17,000-50,000 [1]
processing.Queue 22,000 21,000
Queue managed by server 6,900 6,500
processing.Pipe 52,000 57,000
[1]For some reason the performance of Queue.Queue is very variable on Linux.

Number of acquires/releases of a lock per sec:

Lock type Windows Linux
threading.Lock 850,000 560,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

Number of interleaved waits/notifies per sec on a condition variable by two processes:

Condition type Windows Linux
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

Number of integers retrieved from a sequence per sec:

Sequence type Windows Linux
list 6,400,000 5,100,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/intro.txt000066400000000000000000000301551455552142400232200ustar00rootroot00000000000000.. include:: header.txt ============== Introduction ============== Threads, processes and the GIL ============================== To run more than one piece of code at the same time on the same computer one has the choice of either using multiple processes or multiple threads. Although a program can be made up of multiple processes, these processes are in effect completely independent of one another: different processes are not able to cooperate with one another unless one sets up some means of communication between them (such as by using sockets). If a lot of data must be transferred between processes then this can be inefficient. On the other hand, multiple threads within a single process are intimately connected: they share their data but often can interfere badly with one another. It is often argued that the only way to make multithreaded programming "easy" is to avoid relying on any shared state and for the threads to only communicate by passing messages to each other. CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading easier than it is in most languages by making sure that only one thread can manipulate the interpreter's objects at a time. As a result, it is often safe to let multiple threads access data without using any additional locking as one would need to in a language such as C. One downside of the GIL is that on multi-processor (or multi-core) systems a multithreaded Python program can only make use of one processor at a time. This is a problem that can be overcome by using multiple processes instead. Python gives little direct support for writing programs using multiple process. This package allows one to write multi-process programs using much the same API that one uses for writing threaded programs. Forking and spawning ==================== There are two ways of creating a new process in Python: * The current process can *fork* a new child process by using the `os.fork()` function. This effectively creates an identical copy of the current process which is now able to go off and perform some task set by the parent process. This means that the child process inherits *copies* of all variables that the parent process had. However, `os.fork()` is not available on every platform: in particular Windows does not support it. * Alternatively, the current process can spawn a completely new Python interpreter by using the `subprocess` module or one of the `os.spawn*()` functions. Getting this new interpreter in to a fit state to perform the task set for it by its parent process is, however, a bit of a challenge. The `processing` package uses `os.fork()` if it is available since it makes life a lot simpler. Forking the process is also more efficient in terms of memory usage and the time needed to create the new process. The Process class ================= In the `processing` package processes are spawned by creating a `Process` object and then calling its `start()` method. `processing.Process` follows the API of `threading.Thread`. A trivial example of a multiprocess program is :: from processing import Process def f(name): print 'hello', name if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() Here the function `f` is run in a child process. For an explanation of why (on Windows) the `if __name__ == '__main__'` part is necessary see `Programming guidelines `_. Exchanging objects between processes ==================================== `processing` supports two types of communication channel between processes: **Queues**: The function `Queue()` returns a near clone of `Queue.Queue` -- see the Python standard documentation. For example :: from processing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print q.get() # prints "[42, None, 'hello']" p.join() Queues are thread and process safe. See `Queues `_. **Pipes**: The `Pipe()` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example :: from processing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print parent_conn.recv() # prints "[42, None, 'hello']" p.join() The two connection objects returned by `Pipe()` represent the two ends of the pipe. Each connection object has `send()` and `recv()` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. See `Pipes `_. Synchronization between processes ================================= `processing` contains equivalents of all the synchronization primitives from `threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from processing import Process, Lock def f(l, i): l.acquire() print 'hello world', i l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes =============================== As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then `processing` provides a couple of ways of doing so. **Shared memory**: Data can be stored in a shared memory map using `Value` or `Array`. For example the following code :: from processing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print num.value print arr[:] will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The `'d'` and `'i'` arguments used when creating `num` and `arr` are typecodes of the kind used by the `array` module: `'d'` indicates a double precision float and `'i'` inidicates a signed integer. These shared objects will be process and thread safe. For more flexibility in using shared memory one can use the `processing.sharedctypes` module which supports the creation of arbitrary `ctypes objects allocated from shared memory `_. **Server process**: A manager object returned by `Manager()` controls a server process which holds python objects and allows other processes to manipulate them using proxies. A manager returned by `Manager()` will support types `list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value` and `Array`. For example:: from processing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print d print l will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Creating managers which support other types is not hard --- see `Customized managers `_. Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. See `Server process managers `_. Using a pool of workers ======================= The `Pool()` function returns an object representing a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" See `Process pools `_. Speed ===== The following benchmarks were performed on a single core Pentium 4, 2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see `benchmarks.py <../examples/benchmarks.py>`_. *Number of 256 byte string objects passed between processes/threads per sec*: ================================== ========== ================== Connection type Windows Linux ================================== ========== ================== Queue.Queue 49,000 17,000-50,000 [1]_ processing.Queue 22,000 21,000 Queue managed by server 6,900 6,500 processing.Pipe 52,000 57,000 ================================== ========== ================== .. [1] For some reason the performance of `Queue.Queue` is very variable on Linux. *Number of acquires/releases of a lock per sec*: ============================== ========== ========== Lock type Windows Linux ============================== ========== ========== threading.Lock 850,000 560,000 processing.Lock 420,000 510,000 Lock managed by server 10,000 8,400 threading.RLock 93,000 76,000 processing.RLock 420,000 500,000 RLock managed by server 8,800 7,400 ============================== ========== ========== *Number of interleaved waits/notifies per sec on a condition variable by two processes*: ============================== ========== ========== Condition type Windows Linux ============================== ========== ========== threading.Condition 27,000 31,000 processing.Condition 26,000 25,000 Condition managed by server 6,600 6,000 ============================== ========== ========== *Number of integers retrieved from a sequence per sec*: ============================== ========== ========== Sequence type Windows Linux ============================== ========== ========== list 6,400,000 5,100,000 unsynchornized shared array 3,900,000 3,100,000 synchronized shared array 200,000 220,000 list managed by server 20,000 17,000 ============================== ========== ========== .. _Prev: index.html .. _Up: index.html .. _Next: processing-ref.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/manager-objects.html000066400000000000000000000440461455552142400252570ustar00rootroot00000000000000 Manager objects
Prev         Up         Next

Manager objects

A manager object controls a server process which manages shared objects. Other processes can access the shared objects by using proxies.

Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the processing.managers module.

BaseManager

BaseManager is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects.

The public methods of BaseManager are the following:

__init__(self, address=None, authkey=None)

Creates a manager object.

Once created one should call start() or serveForever() to ensure that the manager object refers to a started manager process.

The arguments to the constructor are as follows:

address

The address on which the manager process listens for new connections. If address is None then an arbitrary one is chosen.

See Listener objects.

authkey

The authentication key which will be used to check the validity of incoming connections to the server process.

If authkey is None then currentProcess().getAuthKey(). Otherwise authkey is used and it must be a string.

See Authentication keys.

start()
Spawn or fork a subprocess to start the manager.
serveForever()
Start the manager in the current process. See Using a remote manager.
fromAddress(address, authkey)
A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See Using a remote manager.
shutdown()

Stop the process used by the manager. This is only available if start() has been used to start the server process.

This can be called multiple times.

BaseManager instances also have one read-only property:

address
The address used by the manager.

The creation of managers which support arbitrary types is discussed below in Customized managers.

SyncManager

SyncManager is a subclass of BaseManager which can be used for the synchronization of processes. Objects of this type are returned by processing.Manager().

It also supports creation of shared lists and dictionaries. The instance methods defined by SyncManager are

BoundedSemaphore(value=1)
Creates a shared threading.BoundedSemaphore object and returns a proxy for it.
Condition(lock=None)

Creates a shared threading.Condition object and returns a proxy for it.

If lock is supplied then it should be a proxy for a threading.Lock or threading.RLock object.

Event()
Creates a shared threading.Event object and returns a proxy for it.
Lock()
Creates a shared threading.Lock object and returns a proxy for it.
Namespace()

Creates a shared Namespace object and returns a proxy for it.

See Namespace objects.

Queue(maxsize=0)
Creates a shared Queue.Queue object and returns a proxy for it.
RLock()
Creates a shared threading.RLock object and returns a proxy for it.
Semaphore(value=1)
Creates a shared threading.Semaphore object and returns a proxy for it.
Array(typecode, sequence)
Create an array and returns a proxy for it. (format is ignored.)
Value(typecode, value)
Create an object with a writable value attribute and returns a proxy for it.
dict(), dict(mapping), dict(sequence)
Creates a shared dict object and returns a proxy for it.
list(), list(sequence)
Creates a shared list object and returns a proxy for it.

Namespace objects

A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes.

However, when using a proxy for a namespace object, an attribute beginning with '_' will be an attribute of the proxy and not an attribute of the referent:

>>> manager = processing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3    # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')

Customized managers

To create one's own manager one creates a subclass of BaseManager.

To create a method of the subclass which will create new shared objects one uses the following function:

CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)

Returns a function with signature func(self, *args, **kwds) which will create a shared object using the manager self and return a proxy for it.

The shared objects will be created by evaluating callable(*args, **kwds) in the manager process.

The arguments are:

callable
The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored.
proxytype

The type of proxy which will be used for object returned by callable.

If proxytype is None then each time an object is returned by callable either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the exposed argument, see below.

exposed

Given a shared object returned by callable, the exposed argument is the list of those method names which should be exposed via BaseProxy._callMethod(). [1] [2]

If exposed is None and callable.__exposed__ exists then callable.__exposed__ is used instead.

If exposed is None and callable.__exposed__ does not exist then all methods of the shared object which do not start with '_' will be exposed.

An attempt to use BaseProxy._callMethod() with a method name which is not exposed will raise an exception.

typeid
If typeid is a string then it is used as an identifier for the callable. Otherwise, typeid must be None and a string prefixed by callable.__name__ is used as the identifier.
[1]A method here means any attribute which has a __call__ attribute.
[2]

The method names __repr__, __str__, and __cmp__ of a shared object are always exposed by the manager. However, instead of invoking the __repr__(), __str__(), __cmp__() instance methods (none of which are guaranteed to exist) they invoke the builtin functions repr(), str() and cmp().

Note that one should generally avoid exposing rich comparison methods like __eq__(), __ne__(), __le__(). To make the proxy type support comparison by value one can just expose __cmp__() instead (even if the referent does not have such a method).

Example

from processing.managers import BaseManager, CreatorMethod

class FooClass(object):
    def bar(self):
        print 'BAR'
    def baz(self):
        print 'BAZ'

class NewManager(BaseManager):
    Foo = CreatorMethod(FooClass)

if __name__ == '__main__':
    manager = NewManager()
    manager.start()
    foo = manager.Foo()
    foo.bar()               # prints 'BAR'
    foo.baz()               # prints 'BAZ'
    manager.shutdown()

See ex_newtype.py for more examples.

Using a remote manager

It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it).

Running the following commands creates a server for a shared queue which remote clients can use:

>>> from processing.managers import BaseManager, CreatorMethod
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy')
...
>>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none')
>>> m.serveForever()

One client can access the server as follows:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.put('hello')

Another client can also use it:

>>> from processing.managers import BaseManager, CreatorMethod
>>> class QueueManager(BaseManager):
...     get_proxy = CreatorMethod(typeid='get_proxy')
...
>>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none')
>>> queue = m.get_proxy()
>>> queue.get()
'hello'
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/manager-objects.txt000066400000000000000000000235161455552142400251310ustar00rootroot00000000000000.. include:: header.txt ================= Manager objects ================= A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the `processing.managers` module. BaseManager =========== `BaseManager` is the base class for all manager classes which use a server process. It does not possess any methods which create shared objects. The public methods of `BaseManager` are the following: `__init__(self, address=None, authkey=None)` Creates a manager object. Once created one should call `start()` or `serveForever()` to ensure that the manager object refers to a started manager process. The arguments to the constructor are as follows: `address` The address on which the manager process listens for new connections. If `address` is `None` then an arbitrary one is chosen. See `Listener objects `_. `authkey` The authentication key which will be used to check the validity of incoming connections to the server process. If `authkey` is `None` then `currentProcess().getAuthKey()`. Otherwise `authkey` is used and it must be a string. See `Authentication keys `_. `start()` Spawn or fork a subprocess to start the manager. `serveForever()` Start the manager in the current process. See `Using a remote manager`_. `fromAddress(address, authkey)` A class method which returns a manager object referring to a pre-existing server process which is using the given address and authentication key. See `Using a remote manager`_. `shutdown()` Stop the process used by the manager. This is only available if `start()` has been used to start the server process. This can be called multiple times. `BaseManager` instances also have one read-only property: `address` The address used by the manager. The creation of managers which support arbitrary types is discussed below in `Customized managers`_. SyncManager =========== `SyncManager` is a subclass of `BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by `processing.Manager()`. It also supports creation of shared lists and dictionaries. The instance methods defined by `SyncManager` are `BoundedSemaphore(value=1)` Creates a shared `threading.BoundedSemaphore` object and returns a proxy for it. `Condition(lock=None)` Creates a shared `threading.Condition` object and returns a proxy for it. If `lock` is supplied then it should be a proxy for a `threading.Lock` or `threading.RLock` object. `Event()` Creates a shared `threading.Event` object and returns a proxy for it. `Lock()` Creates a shared `threading.Lock` object and returns a proxy for it. `Namespace()` Creates a shared `Namespace` object and returns a proxy for it. See `Namespace objects`_. `Queue(maxsize=0)` Creates a shared `Queue.Queue` object and returns a proxy for it. `RLock()` Creates a shared `threading.RLock` object and returns a proxy for it. `Semaphore(value=1)` Creates a shared `threading.Semaphore` object and returns a proxy for it. `Array(typecode, sequence)` Create an array and returns a proxy for it. (`format` is ignored.) `Value(typecode, value)` Create an object with a writable `value` attribute and returns a proxy for it. `dict()`, `dict(mapping)`, `dict(sequence)` Creates a shared `dict` object and returns a proxy for it. `list()`, `list(sequence)` Creates a shared `list` object and returns a proxy for it. Namespace objects ----------------- A namespace object has no public methods but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with `'_'` will be an attribute of the proxy and not an attribute of the referent:: >>> manager = processing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print Global Namespace(x=10, y='hello') Customized managers =================== To create one's own manager one creates a subclass of `BaseManager`. To create a method of the subclass which will create new shared objects one uses the following function: `CreatorMethod(callable=None, proxytype=None, exposed=None, typeid=None)` Returns a function with signature `func(self, *args, **kwds)` which will create a shared object using the manager `self` and return a proxy for it. The shared objects will be created by evaluating `callable(*args, **kwds)` in the manager process. The arguments are: `callable` The callable used to create a shared object. If the manager will connect to a remote manager then this is ignored. `proxytype` The type of proxy which will be used for object returned by `callable`. If `proxytype` is `None` then each time an object is returned by `callable` either a new proxy type is created or a cached one is reused. The methods of the shared object which will be exposed via the proxy will then be determined by the `exposed` argument, see below. `exposed` Given a shared object returned by `callable`, the `exposed` argument is the list of those method names which should be exposed via |callmethod|_. [#]_ [#]_ If `exposed` is `None` and `callable.__exposed__` exists then `callable.__exposed__` is used instead. If `exposed` is `None` and `callable.__exposed__` does not exist then all methods of the shared object which do not start with `'_'` will be exposed. An attempt to use |callmethod| with a method name which is not exposed will raise an exception. `typeid` If `typeid` is a string then it is used as an identifier for the callable. Otherwise, `typeid` must be `None` and a string prefixed by `callable.__name__` is used as the identifier. .. |callmethod| replace:: ``BaseProxy._callMethod()`` .. _callmethod: proxy-objects.html#methods-of-baseproxy .. [#] A method here means any attribute which has a `__call__` attribute. .. [#] The method names `__repr__`, `__str__`, and `__cmp__` of a shared object are always exposed by the manager. However, instead of invoking the `__repr__()`, `__str__()`, `__cmp__()` instance methods (none of which are guaranteed to exist) they invoke the builtin functions `repr()`, `str()` and `cmp()`. Note that one should generally avoid exposing rich comparison methods like `__eq__()`, `__ne__()`, `__le__()`. To make the proxy type support comparison by value one can just expose `__cmp__()` instead (even if the referent does not have such a method). Example ------- :: from processing.managers import BaseManager, CreatorMethod class FooClass(object): def bar(self): print 'BAR' def baz(self): print 'BAZ' class NewManager(BaseManager): Foo = CreatorMethod(FooClass) if __name__ == '__main__': manager = NewManager() manager.start() foo = manager.Foo() foo.bar() # prints 'BAR' foo.baz() # prints 'BAZ' manager.shutdown() See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples. Using a remote manager ====================== It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a shared queue which remote clients can use:: >>> from processing.managers import BaseManager, CreatorMethod >>> import Queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(callable=lambda:queue, typeid='get_proxy') ... >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='none') >>> m.serveForever() One client can access the server as follows:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.put('hello') Another client can also use it:: >>> from processing.managers import BaseManager, CreatorMethod >>> class QueueManager(BaseManager): ... get_proxy = CreatorMethod(typeid='get_proxy') ... >>> m = QueueManager.fromAddress(address=('foo.bar.org', 50000), authkey='none') >>> queue = m.get_proxy() >>> queue.get() 'hello' .. _Prev: connection-objects.html .. _Up: processing-ref.html .. _Next: proxy-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/pool-objects.html000066400000000000000000000265511455552142400246170ustar00rootroot00000000000000 Process Pools
Prev         Up         Next

Process Pools

The processing.pool module has one public class:

class Pool(processes=None, initializer=None, initargs=())

A class representing a pool of worker processes.

Tasks can be offloaded to the pool and the results dealt with when they become available.

Note that tasks can only be submitted (or retrieved) by the process which created the pool object.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

Pool objects

Pool has the following public methods:

__init__(processes=None)
The constructor creates and starts processes worker processes. If processes is None then cpuCount() is used to find a default or 1 if cpuCount() raises NotImplemented.
apply(func, args=(), kwds={})
Equivalent of the apply() builtin function. It blocks till the result is ready.
applyAsync(func, args=(), kwds={}, callback=None)

A variant of the apply() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

map(func, iterable, chunksize=None)

A parallel equivalent of the map() builtin function. It blocks till the result is ready.

This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.

mapAsync(func, iterable, chunksize=None, callback=None)

A variant of the map() method which returns a result object --- see Asynchronous result objects.

If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready callback is applied to it (unless the call failed). callback should complete immediately since otherwise the thread which handles the results will get blocked.

imap(func, iterable, chunksize=1)

An equivalent of itertools.imap().

The chunksize argument is the same as the one used by the map() method. For very long iterables using a large value for chunksize can make make the job complete much faster than using the default value of 1.

Also if chunksize is 1 then the next() method of the iterator returned by the imap() method has an optional timeout parameter: next(timeout) will raise processing.TimeoutError if the result cannot be returned within timeout seconds.

imapUnordered(func, iterable, chunksize=1)
The same as imap() except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".)
close()
Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.
terminate()
Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected terminate() will be called immediately.
join()
Wait for the worker processes to exit. One must call close() or terminate() before using join().

Asynchronous result objects

The result objects returns by applyAsync() and mapAsync() have the following public methods:

get(timeout=None)
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then processing.TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
wait(timeout=None)
Waits until the result is available or until timeout seconds pass.
ready()
Returns whether the call has completed.
successful()
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.

Examples

The following example demonstrates the use of a pool:

from processing import Pool

def f(x):
    return x*x

if __name__ == '__main__':
    pool = Pool(processes=4)              # start 4 worker processes

    result = pool.applyAsync(f, (10,))    # evaluate "f(10)" asynchronously
    print result.get(timeout=1)           # prints "100" unless your computer is *very* slow

    print pool.map(f, range(10))          # prints "[0, 1, 4,..., 81]"

    it = pool.imap(f, range(10))
    print it.next()                       # prints "0"
    print it.next()                       # prints "1"
    print it.next(timeout=1)              # prints "4" unless your computer is *very* slow

    import time
    result = pool.applyAsync(time.sleep, (10,))
    print result.get(timeout=1)           # raises `TimeoutError`

See also ex_pool.py.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/pool-objects.txt000066400000000000000000000136411455552142400244660ustar00rootroot00000000000000.. include:: header.txt =============== Process Pools =============== The `processing.pool` module has one public class: **class** `Pool(processes=None, initializer=None, initargs=())` A class representing a pool of worker processes. Tasks can be offloaded to the pool and the results dealt with when they become available. Note that tasks can only be submitted (or retrieved) by the process which created the pool object. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. Pool objects ============ `Pool` has the following public methods: `__init__(processes=None)` The constructor creates and starts `processes` worker processes. If `processes` is `None` then `cpuCount()` is used to find a default or 1 if `cpuCount()` raises `NotImplemented`. `apply(func, args=(), kwds={})` Equivalent of the `apply()` builtin function. It blocks till the result is ready. `applyAsync(func, args=(), kwds={}, callback=None)` A variant of the `apply()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `map(func, iterable, chunksize=None)` A parallel equivalent of the `map()` builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting `chunksize` to a positive integer. `mapAsync(func, iterable, chunksize=None, callback=None)` A variant of the `map()` method which returns a result object --- see `Asynchronous result objects`_. If `callback` is specified then it should be a callable which accepts a single argument. When the result becomes ready `callback` is applied to it (unless the call failed). `callback` should complete immediately since otherwise the thread which handles the results will get blocked. `imap(func, iterable, chunksize=1)` An equivalent of `itertools.imap()`. The `chunksize` argument is the same as the one used by the `map()` method. For very long iterables using a large value for `chunksize` can make make the job complete **much** faster than using the default value of `1`. Also if `chunksize` is `1` then the `next()` method of the iterator returned by the `imap()` method has an optional `timeout` parameter: `next(timeout)` will raise `processing.TimeoutError` if the result cannot be returned within `timeout` seconds. `imapUnordered(func, iterable, chunksize=1)` The same as `imap()` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) `close()` Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. `terminate()` Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected `terminate()` will be called immediately. `join()` Wait for the worker processes to exit. One must call `close()` or `terminate()` before using `join()`. Asynchronous result objects =========================== The result objects returns by `applyAsync()` and `mapAsync()` have the following public methods: `get(timeout=None)` Returns the result when it arrives. If `timeout` is not `None` and the result does not arrive within `timeout` seconds then `processing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by `get()`. `wait(timeout=None)` Waits until the result is available or until `timeout` seconds pass. `ready()` Returns whether the call has completed. `successful()` Returns whether the call completed without raising an exception. Will raise `AssertionError` if the result is not ready. Examples ======== The following example demonstrates the use of a pool:: from processing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously print result.get(timeout=1) # prints "100" unless your computer is *very* slow print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print it.next() # prints "0" print it.next() # prints "1" print it.next(timeout=1) # prints "4" unless your computer is *very* slow import time result = pool.applyAsync(time.sleep, (10,)) print result.get(timeout=1) # raises `TimeoutError` See also `ex_pool.py <../examples/ex_pool.py>`_. .. _Prev: proxy-objects.html .. _Up: processing-ref.html .. _Next: sharedctypes.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/process-objects.html000066400000000000000000000235741455552142400253260ustar00rootroot00000000000000 Process objects
Prev         Up         Next

Process objects

Process objects represent activity that is run in a separate process.

Process

The Process class has equivalents of all the methods of threading.Thread:

__init__(group=None, target=None, name=None, args=(), kwargs={})

This constructor should always be called with keyword arguments. Arguments are:

group
should be None; exists for compatibility with threading.Thread.
target
is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called.
name
is the process name. By default, a unique name is constructed of the form 'Process-N1:N2:...:Nk' where N1,N2,...,Nk is a sequence of integers whose length is determined by the generation of the process.
args
is the argument tuple for the target invocation. Defaults to ().
kwargs
is a dictionary of keyword arguments for the target invocation. Defaults to {}.

If a subclass overrides the constructor, it must make sure it invokes the base class constructor (Process.__init__()) before doing anything else to the process.

run()

Method representing the process's activity.

You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively.

start()

Start the process's activity.

This must be called at most once per process object. It arranges for the object's run() method to be invoked in a separate process.

join(timeout=None)

This blocks the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.

If timeout is None then there is no timeout.

A process can be joined many times.

A process cannot join itself because this would cause a deadlock.

It is an error to attempt to join a process before it has been started.

getName()
Return the process's name.
setName(name)

Set the process's name.

The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor.

isAlive()

Return whether the process is alive.

Roughly, a process object is alive from the moment the start() method returns until the child process terminates.

isDaemon()
Return the process's daemon flag.
setDaemon(daemonic)

Set the process's daemon flag to the Boolean value daemonic. This must be called before start() is called.

The initial value is inherited from the creating process.

When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes.

In addition process objects also support the following methods.

getPid()
Return the process ID. Before the process is spawned this will be None.
getExitCode()
Return the child's exit code. This will be None if the process has not yet terminated. A negative value -N indicates that the child was terminated by signal N.
getAuthKey()

Return the process's authentication key (a string).

When the processing package is initialized the main process is assigned a random hexadecimal string.

When a Process object is created it will inherit the authentication key of its parent process, although this may be changed using setAuthKey() below.

See Authentication Keys.

setAuthKey(authkey)
Set the process's authentication key which must be a string.
terminate()

Terminate the process. On Unix this is done using the SIGTERM signal and on Windows TerminateProcess() is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will not be terminates.

Warning

If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock.

Note that the start(), join(), isAlive() and getExitCode() methods should only be called by the process that created the process object.

Example

Example usage of some of the methods of Process:

>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/process-objects.txt000066400000000000000000000136131455552142400251720ustar00rootroot00000000000000.. include:: header.txt ================= Process objects ================= Process objects represent activity that is run in a separate process. Process ======= The `Process` class has equivalents of all the methods of `threading.Thread`: `__init__(group=None, target=None, name=None, args=(), kwargs={})` This constructor should always be called with keyword arguments. Arguments are: `group` should be `None`; exists for compatibility with `threading.Thread`. `target` is the callable object to be invoked by the `run()` method. Defaults to None, meaning nothing is called. `name` is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. `args` is the argument tuple for the target invocation. Defaults to `()`. `kwargs` is a dictionary of keyword arguments for the target invocation. Defaults to `{}`. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (`Process.__init__()`) before doing anything else to the process. `run()` Method representing the process's activity. You may override this method in a subclass. The standard `run()` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the `args` and `kwargs` arguments, respectively. `start()` Start the process's activity. This must be called at most once per process object. It arranges for the object's `run()` method to be invoked in a separate process. `join(timeout=None)` This blocks the calling thread until the process whose `join()` method is called terminates or until the optional timeout occurs. If `timeout` is `None` then there is no timeout. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. `getName()` Return the process's name. `setName(name)` Set the process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. `isAlive()` Return whether the process is alive. Roughly, a process object is alive from the moment the `start()` method returns until the child process terminates. `isDaemon()` Return the process's daemon flag. `setDaemon(daemonic)` Set the process's daemon flag to the Boolean value `daemonic`. This must be called before `start()` is called. The initial value is inherited from the creating process. When a parent process finishes it attempts to stop all of its daemonic child processes and then tries to join each of its non-daemonic child processes. In addition process objects also support the following methods. `getPid()` Return the process ID. Before the process is spawned this will be `None`. `getExitCode()` Return the child's exit code. This will be `None` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. `getAuthKey()` Return the process's authentication key (a string). When the `processing` package is initialized the main process is assigned a random hexadecimal string. When a `Process` object is created it will inherit the authentication key of its parent process, although this may be changed using `setAuthKey()` below. See `Authentication Keys `_. `setAuthKey(authkey)` Set the process's authentication key which must be a string. `terminate()` Terminate the process. On Unix this is done using the `SIGTERM` signal and on Windows `TerminateProcess()` is used. Note that exit handlers and finally clauses etc will not be executed. Also note that descendants of the process will *not* be terminates. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the `start()`, `join()`, `isAlive()` and `getExitCode()` methods should only be called by the process that created the process object. Example ======= Example usage of some of the methods of `Process`:: >>> import processing, time, signal >>> p = processing.Process(target=time.sleep, args=(1000,)) >>> print p, p.isAlive() False >>> p.start() >>> print p, p.isAlive() True >>> p.terminate() >>> print p, p.isAlive() False >>> p.getExitCode() == -signal.SIGTERM True .. _Prev: processing-ref.html .. _Up: processing-ref.html .. _Next: queue-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/processing-ref.html000066400000000000000000000573611455552142400251500ustar00rootroot00000000000000 processing package reference
Prev         Up         Next

processing package reference

The processing package mostly replicates the API of the threading module.

Classes and exceptions

class Process(group=None, target=None, name=None, args=(), kwargs={})

An analogue of threading.Thread.

See Process objects.

exception BufferTooShort

Exception raised by the recvBytesInto() method of a connection object when the supplied buffer object is too small for the message read.

If e is an instance of BufferTooShort then e.args[0] will give the message as a byte string.

Pipes and Queues

When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks.

For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers).

Note that one can also create a shared queue by using a manager object -- see Managers.

For an example of the usage of queues for interprocess communication see ex_workers.py.

Pipe(duplex=True)

Returns a pair (conn1, conn2) of connection objects representing the ends of a pipe.

If duplex is true then the pipe is two way; otherwise conn1 can only be used for receiving messages and conn2 can only be used for sending messages.

See Connection objects.

Queue(maxsize=0)

Returns a process shared queue object. The usual Empty and Full exceptions from the standard library's Queue module are raised to signal timeouts.

See Queue objects.

Synchronization primitives

Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's threading module.

Note that one can also create synchronization primitves by using a manager object -- see Managers.

BoundedSemaphore(value=1)

Returns a bounded semaphore object: a clone of threading.BoundedSemaphore.

(On Mac OSX this is indistiguishable from Semaphore() because sem_getvalue() is not implemented on that platform).

Condition(lock=None)

Returns a condition variable: a clone of threading.Condition.

If lock is specified then it should be a Lock or RLock object from processing.

Event()
Returns an event object: a clone of threading.Event.
Lock()
Returns a non-recursive lock object: a clone of threading.Lock.
RLock()
Returns a recursive lock object: a clone of threading.RLock.
Semaphore(value=1)
Returns a bounded semaphore object: a clone of threading.Semaphore.

Acquiring with a timeout

The acquire() method of BoundedSemaphore, Lock, RLock and Semaphore has a timeout parameter not supported by the equivalents in threading. The signature is acquire(block=True, timeout=None) with keyword parameters being acceptable. If block is true and timeout is not None then it specifies a timeout in seconds. If block is false then timeout is ignored.

Interrupting the main thread

If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to BoundedSemaphore.acquire(), Lock.acquire(), RLock.acquire(), Semaphore.acquire(), Condition.acquire() or Condition.wait() then the call will be immediately interrupted and KeyboardInterrupt will be raised.

This differs from the behaviour of threading where SIGINT will be ignored while the equivalent blocking calls are in progress.

Shared Objects

It is possible to create shared objects using shared memory which can be inherited by child processes.

Value(typecode_or_type, *args, **, lock=True)

Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Array(typecode_or_type, size_or_initializer, **, lock=True)

Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library.

See also sharedctypes.

Managers

Managers provide a way to create data which can be shared between different processes.

Manager()

Returns a started SyncManager object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies.

The methods for creating shared objects are

list(), dict(), Namespace(), Value(), Array(), Lock(), RLock(), Semaphore(), BoundedSemaphore(), Condition(), Event(), Queue().

See SyncManager.

It is possible to create managers which support other types -- see Customized managers.

Process Pools

One can create a pool of processes which will carry out tasks submitted to it.

Pool(processes=None, initializer=None, initargs=())

Returns a process pool object which controls a pool of worker processes to which jobs can be submitted.

It supports asynchronous results with timeouts and callbacks and has a parallel map implementation.

processes is the number of worker processes to use. If processes is None then the number returned by cpuCount() is used. If initializer is not None then each worker process will call initializer(*initargs) when it starts.

See Pool objects.

Logging

Some support for logging is available. Note, however, that the logging package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up.

enableLogging(level, HandlerType=None, handlerArgs=(), format=None)

Enables logging and sets the debug level used by the package's logger to level. See documentation for the logging module in the standard library.

If HandlerType is specified then a handler is created using HandlerType(*handlerArgs) and this will be used by the logger -- any previous handlers will be discarded. If format is specified then this will be used for the handler; otherwise format defaults to '[%(levelname)s/%(processName)s] %(message)s'. (The logger used by processing allows use of the non-standard '%(processName)s' format.)

If HandlerType is not specified and the logger has no handlers then a default one is created which prints to sys.stderr.

Note: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call enableLogging() with the same arguments which were used when its parent process last called enableLogging() (if it ever did).

getLogger()
Returns the logger used by processing. If enableLogging() has not yet been called then None is returned.

Below is an example session with logging turned on:

>>> import processing, logging
>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] manager exiting with exitcode 0

Miscellaneous

activeChildren()

Return list of all live children of the current process.

Calling this has the side affect of "joining" any processes which have already finished.

cpuCount()
Returns the number of CPUs in the system. May raise NotImplementedError.
currentProcess()

An analogue of threading.current_thread().

Returns the object corresponding to the current process.

freezeSupport()

Adds support for when a program which uses the processing package has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)

One needs to call this function straight after the if __name__ == '__main__' line of the main module. For example

from processing import Process, freezeSupport

def f():
    print 'hello world!'

if __name__ == '__main__':
    freezeSupport()
    Process(target=f).start()

If the freezeSupport() line is missed out then trying to run the frozen executable will raise RuntimeError.

If the module is being run normally by the python interpreter then freezeSupport() has no effect.

Note

  • The processing.dummy package replicates the API of processing but is no more than a wrapper around the threading module.
  • processing contains no analogues of activeCount, enumerate, settrace, setprofile, Timer, or local from the threading module.
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/processing-ref.txt000066400000000000000000000310141455552142400250060ustar00rootroot00000000000000.. include:: header.txt ============================== processing package reference ============================== The `processing` package mostly replicates the API of the `threading` module. Classes and exceptions ---------------------- **class** `Process(group=None, target=None, name=None, args=(), kwargs={})` An analogue of `threading.Thread`. See `Process objects`_. **exception** `BufferTooShort` Exception raised by the `recvBytesInto()` method of a `connection object `_ when the supplied buffer object is too small for the message read. If `e` is an instance of `BufferTooShort` then `e.args[0]` will give the message as a byte string. Pipes and Queues ---------------- When using multiple processes one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use a pipe (for a connection between two processes) or a queue (which allows multiple producers and consumers). Note that one can also create a shared queue by using a manager object -- see `Managers`_. For an example of the usage of queues for interprocess communication see `ex_workers.py <../examples/ex_workers.py>`_. `Pipe(duplex=True)` Returns a pair `(conn1, conn2)` of connection objects representing the ends of a pipe. If `duplex` is true then the pipe is two way; otherwise `conn1` can only be used for receiving messages and `conn2` can only be used for sending messages. See `Connection objects `_. `Queue(maxsize=0)` Returns a process shared queue object. The usual `Empty` and `Full` exceptions from the standard library's `Queue` module are raised to signal timeouts. See `Queue objects `_. Synchronization primitives -------------------------- Generally synchronization primitives are not as necessary in a multiprocess program as they are in a mulithreaded program. See the documentation for the standard library's `threading` module. Note that one can also create synchronization primitves by using a manager object -- see `Managers`_. `BoundedSemaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.BoundedSemaphore`. (On Mac OSX this is indistiguishable from `Semaphore()` because `sem_getvalue()` is not implemented on that platform). `Condition(lock=None)` Returns a condition variable: a clone of `threading.Condition`. If `lock` is specified then it should be a `Lock` or `RLock` object from `processing`. `Event()` Returns an event object: a clone of `threading.Event`. `Lock()` Returns a non-recursive lock object: a clone of `threading.Lock`. `RLock()` Returns a recursive lock object: a clone of `threading.RLock`. `Semaphore(value=1)` Returns a bounded semaphore object: a clone of `threading.Semaphore`. .. admonition:: Acquiring with a timeout The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and `Semaphore` has a timeout parameter not supported by the equivalents in `threading`. The signature is `acquire(block=True, timeout=None)` with keyword parameters being acceptable. If `block` is true and `timeout` is not `None` then it specifies a timeout in seconds. If `block` is false then `timeout` is ignored. .. admonition:: Interrupting the main thread If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to `BoundedSemaphore.acquire()`, `Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()`, `Condition.acquire()` or `Condition.wait()` then the call will be immediately interrupted and `KeyboardInterrupt` will be raised. This differs from the behaviour of `threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared Objects -------------- It is possible to create shared objects using shared memory which can be inherited by child processes. `Value(typecode_or_type, *args, **, lock=True)` Returns a ctypes object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Array(typecode_or_type, size_or_initializer, **, lock=True)` Returns a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see the documentation for ctypes in the standard library. See also `sharedctypes `_. Managers -------- Managers provide a way to create data which can be shared between different processes. `Manager()` Returns a started `SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. The methods for creating shared objects are `list()`, `dict()`, `Namespace()`, `Value()`, `Array()`, `Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`. See `SyncManager `_. It is possible to create managers which support other types -- see `Customized managers `_. Process Pools ------------- One can create a pool of processes which will carry out tasks submitted to it. `Pool(processes=None, initializer=None, initargs=())` Returns a process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. `processes` is the number of worker processes to use. If `processes` is `None` then the number returned by `cpuCount()` is used. If `initializer` is not `None` then each worker process will call `initializer(*initargs)` when it starts. See `Pool objects `_. Logging ------- Some support for logging is available. Note, however, that the `logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. `enableLogging(level, HandlerType=None, handlerArgs=(), format=None)` Enables logging and sets the debug level used by the package's logger to `level`. See documentation for the `logging` module in the standard library. If `HandlerType` is specified then a handler is created using `HandlerType(*handlerArgs)` and this will be used by the logger -- any previous handlers will be discarded. If `format` is specified then this will be used for the handler; otherwise `format` defaults to `'[%(levelname)s/%(processName)s] %(message)s'`. (The logger used by `processing` allows use of the non-standard `'%(processName)s'` format.) If `HandlerType` is not specified and the logger has no handlers then a default one is created which prints to `sys.stderr`. *Note*: on Windows a child process does not directly inherit its parent's logger; instead it will automatically call `enableLogging()` with the same arguments which were used when its parent process last called `enableLogging()` (if it ever did). `getLogger()` Returns the logger used by `processing`. If `enableLogging()` has not yet been called then `None` is returned. Below is an example session with logging turned on:: >>> import processing, logging >>> processing.enableLogging(level=logging.INFO) >>> processing.getLogger().warning('doomed') [WARNING/MainProcess] doomed >>> m = processing.Manager() [INFO/SyncManager-1] child process calling self.run() [INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-1] manager received shutdown message [INFO/SyncManager-1] manager exiting with exitcode 0 Miscellaneous ------------- `activeChildren()` Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. `cpuCount()` Returns the number of CPUs in the system. May raise `NotImplementedError`. `currentProcess()` An analogue of `threading.current_thread()`. Returns the object corresponding to the current process. `freezeSupport()` Adds support for when a program which uses the `processing` package has been frozen to produce a Windows executable. (Has been tested with `py2exe`, `PyInstaller` and `cx_Freeze`.) One needs to call this function straight after the `if __name__ == '__main__'` line of the main module. For example :: from processing import Process, freezeSupport def f(): print 'hello world!' if __name__ == '__main__': freezeSupport() Process(target=f).start() If the `freezeSupport()` line is missed out then trying to run the frozen executable will raise `RuntimeError`. If the module is being run normally by the python interpreter then `freezeSupport()` has no effect. .. note:: * The `processing.dummy` package replicates the API of `processing` but is no more than a wrapper around the `threading` module. * `processing` contains no analogues of `activeCount`, `enumerate`, `settrace`, `setprofile`, `Timer`, or `local` from the `threading` module. Subsections ----------- + `Process objects `_ + `Queue objects `_ + `Connection objects `_ + `Manager objects `_ + `Proxy objects `_ + `Pool objects `_ + `Shared ctypes object `_ + `Listeners and Clients `_ .. _Prev: intro.html .. _Up: index.html .. _Next: process-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/programming-guidelines.html000066400000000000000000000214551455552142400266650ustar00rootroot00000000000000 Programming guidelines
Prev         Up         Next

Programming guidelines

There are certain guidelines and idioms which should be adhered to when using the processing package.

All platforms

Avoid shared state

As far as possible one should try to avoid shifting large amounts of data between processes.

It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the threading module.

Picklability:
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies:

Do not use a proxy object from more than one thread unless you protect it with a lock.

(There is never a problem with different processes using the 'same' proxy.)

Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or activeChildren() is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's isAlive() will join the process. Even so it is probably good practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from the processing package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes

Using the terminate() method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes.

Therefore it is probably best to only consider using terminate() on processes which never use any shared resources.

Joining processes that use queues

Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the cancelJoin() method of the queue to avoid this behaviour.)

This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined.

An example which will deadlock is the following:

from processing import Process, Queue

def f(q):
    q.put('X' * 1000000)

if __name__ == '__main__':
    queue = Queue()
    p = Process(target=f, args=(queue,))
    p.start()
    p.join()                    # this deadlocks
    obj = queue.get()

A fix here would be to swap the last two lines round (or simply remove the p.join() line).

Explicity pass resources to child processes

On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process.

Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process.

So for instance

from processing import Process, Lock

def f():
    ... do something using "lock" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f).start()

should be rewritten as

from processing import Process, Lock

def f(l):
    ... do something using "l" ...

if __name__ == '__main__':
   lock = Lock()
   for i in range(10):
        Process(target=f, args=(lock,)).start()

Windows

Since Windows lacks os.fork() it has a few extra restrictions:

More picklability:

Ensure that all arguments to Process.__init__() are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the target argument on Windows --- just define a function and use that instead.

Also, if you subclass Process then make sure that instances will be picklable when the start() method is called.

Global variables:

Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that start() was called.

However, global variables which are just module level constants cause no problems.

Safe importing of main module:

Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process).

For example, under Windows running the following module would fail with a RuntimeError:

from processing import Process

def foo():
    print 'hello'

p = Process(target=foo)
p.start()

Instead one should protect the "entry point" of the program by using if __name__ == '__main__': as follows:

from processing import Process

def foo():
    print 'hello'

if __name__ == '__main__':
    freezeSupport()
    p = Process(target=foo)
    p.start()

(The freezeSupport() line can be ommitted if the program will be run normally instead of frozen.)

This allows the newly spawned Python interpreter to safely import the module and then run the module's foo() function.

Similar restrictions apply if a pool or manager is created in the main module.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/programming-guidelines.txt000066400000000000000000000150221455552142400265310ustar00rootroot00000000000000.. include:: header.txt ======================== Programming guidelines ======================== There are certain guidelines and idioms which should be adhered to when using the `processing` package. All platforms ------------- *Avoid shared state* As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the `threading` module. *Picklability*: Ensure that the arguments to the methods of proxies are picklable. *Thread safety of proxies*: Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the 'same' proxy.) *Joining zombie processes* On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or `activeChildren()` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's `isAlive()` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. *Better to inherit than pickle/unpickle* On Windows many of types from the `processing` package need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which need access to a shared resource created elsewhere can inherit it from an ancestor process. *Avoid terminating processes* Using the `terminate()` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using `terminate()` on processes which never use any shared resources. *Joining processes that use queues* Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the `cancelJoin()` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from processing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the `p.join()` line). *Explicity pass resources to child processes* On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from processing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from processing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Windows ------- Since Windows lacks `os.fork()` it has a few extra restrictions: *More picklability*: Ensure that all arguments to `Process.__init__()` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the `target` argument on Windows --- just define a function and use that instead. Also, if you subclass `Process` then make sure that instances will be picklable when the `start()` method is called. *Global variables*: Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that `start()` was called. However, global variables which are just module level constants cause no problems. *Safe importing of main module*: Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a `RuntimeError`:: from processing import Process def foo(): print 'hello' p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using `if __name__ == '__main__':` as follows:: from processing import Process def foo(): print 'hello' if __name__ == '__main__': freezeSupport() p = Process(target=foo) p.start() (The `freezeSupport()` line can be ommitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's `foo()` function. Similar restrictions apply if a pool or manager is created in the main module. .. _Prev: connection-ref.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/proxy-objects.html000066400000000000000000000175771455552142400250370ustar00rootroot00000000000000 Proxy objects
Prev         Up         Next

Proxy objects

A proxy is an object which refers to a shared object which lives (presumably) in a different process. The shared object is said to be the referent of the proxy. Multiple proxy objects may have the same referent.

A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<Proxy[list] object at 0x00DFA230>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
>>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
True

Notice that applying str() to a proxy will return the representation of the referent, whereas applying repr() will return the representation of the proxy.

An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:

>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b)         # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']

Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the for statement:

>>> a = manager.dict([(i*i, i) for i in range(10)])
>>> for key in a:
...     print '<%r,%r>' % (key, a[key]),
...
<0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6>

Note

Although list and dict proxy objects are iterable, it will be much more efficient to iterate over a copy of the referent, for example

for item in some_list[:]:
    ...

and

for key in some_dict.keys():
    ...

Methods of BaseProxy

Proxy objects are instances of subclasses of BaseProxy. The only semi-public methods of BaseProxy are the following:

_callMethod(methodname, args=(), kwds={})

Call and return the result of a method of the proxy's referent.

If proxy is a proxy whose referent is obj then the expression

proxy._callMethod(methodname, args, kwds)

will evaluate the expression

getattr(obj, methodname)(*args, **kwds)         (*)

in the manager's process.

The returned value will be either a copy of the result of (*) or if the result is an unpicklable iterator then a proxy for the iterator.

If an exception is raised by (*) then then is re-raised by _callMethod(). If some other exception is raised in the manager's process then this is converted into a RemoteError exception and is raised by _callMethod().

Note in particular that an exception will be raised if methodname has not been exposed --- see the exposed argument to CreatorMethod.

_getValue()

Return a copy of the referent.

If the referent is unpicklable then this will raise an exception.

__repr__
Return a representation of the proxy object.
__str__
Return the representation of the referent.

Cleanup

A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent.

A shared object gets deleted from the manager process when there are no longer any proxies referring to it.

Examples

An example of the usage of _callMethod():

>>> l = manager.list(range(10))
>>> l._callMethod('__getslice__', (2, 7))   # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._callMethod('__iter__')               # equiv to `iter(l)`
<Proxy[iter] object at 0x00DFAFF0>
>>> l._callMethod('__getitem__', (20,))     # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range

As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:

class IteratorProxy(BaseProxy):
    def __iter__(self):
        return self
    def next(self):
        return self._callMethod('next')
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/proxy-objects.txt000066400000000000000000000115571455552142400247020ustar00rootroot00000000000000.. include:: header.txt =============== Proxy objects =============== A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that the its referent can:: >>> from processing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print l [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print repr(l) >>> l[4] 16 >>> l[2:5] [4, 9, 16] >>> l == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] True Notice that applying `str()` to a proxy will return the representation of the referent, whereas applying `repr()` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of `a` now contains referent of `b` >>> print a, b [[]] [] >>> b.append('hello') >>> print a, b [['hello']] ['hello'] Some proxy methods return a proxy for an iterator. In particular list and dictionary proxies are iterables so they can be used with the `for` statement:: >>> a = manager.dict([(i*i, i) for i in range(10)]) >>> for key in a: ... print '<%r,%r>' % (key, a[key]), ... <0,0> <1,1> <4,2> <81,9> <64,8> <9,3> <16,4> <49,7> <25,5> <36,6> .. note:: Although `list` and `dict` proxy objects are iterable, it will be much more efficient to iterate over a *copy* of the referent, for example :: for item in some_list[:]: ... and :: for key in some_dict.keys(): ... Methods of `BaseProxy` ====================== Proxy objects are instances of subclasses of `BaseProxy`. The only semi-public methods of `BaseProxy` are the following: `_callMethod(methodname, args=(), kwds={})` Call and return the result of a method of the proxy's referent. If `proxy` is a proxy whose referent is `obj` then the expression `proxy._callMethod(methodname, args, kwds)` will evaluate the expression `getattr(obj, methodname)(*args, **kwds)` |spaces| _`(*)` in the manager's process. The returned value will be either a copy of the result of `(*)`_ or if the result is an unpicklable iterator then a proxy for the iterator. If an exception is raised by `(*)`_ then then is re-raised by `_callMethod()`. If some other exception is raised in the manager's process then this is converted into a `RemoteError` exception and is raised by `_callMethod()`. Note in particular that an exception will be raised if `methodname` has not been *exposed* --- see the `exposed` argument to `CreatorMethod `_. `_getValue()` Return a copy of the referent. If the referent is unpicklable then this will raise an exception. `__repr__` Return a representation of the proxy object. `__str__` Return the representation of the referent. Cleanup ======= A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Examples ======== An example of the usage of `_callMethod()`:: >>> l = manager.list(range(10)) >>> l._callMethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callMethod('__iter__') # equiv to `iter(l)` >>> l._callMethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range As an example definition of a subclass of BaseProxy, the proxy type used for iterators is the following:: class IteratorProxy(BaseProxy): def __iter__(self): return self def next(self): return self._callMethod('next') .. _Prev: manager-objects.html .. _Up: processing-ref.html .. _Next: pool-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/queue-objects.html000066400000000000000000000227101455552142400247630ustar00rootroot00000000000000 Queue objects
Prev         Up         Next

Queue objects

The queue type provided by processing is a multi-producer, multi-consumer FIFO queue modelled on the Queue.Queue class in the standard library.

Queue(maxsize=0)

Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe.

Queue.Queue implements all the methods of Queue.Queue except for qsize(), task_done() and join().

empty()
Return True if the queue is empty, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
full()
Return True if the queue is full, False otherwise. Because of multithreading/multiprocessing semantics, this is not reliable.
put(item, block=True, timeout=None)
Put item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).
put_nowait(item), putNoWait(item)
Equivalent to put(item, False).
get(block=True, timeout=None)
Remove and return an item from the queue. If optional args block is true and timeout is None (the default), block if necessary until an item is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the Empty exception (timeout is ignored in that case).
get_nowait(), getNoWait()
Equivalent to get(False).

processing.Queue has a few additional methods not found in Queue.Queue which are usually unnecessary:

putMany(iterable)
If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So q.putMany(X) is a faster alternative to for x in X: q.put(x). Raises an error if the queue has finite size.
close()
Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected.
joinThread()

This joins the background thread and can only be used after close() has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe.

By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call cancelJoin() to prevent this behaviour.

cancelJoin()
Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue.

Empty and Full

processing uses the usual Queue.Empty and Queue.Full exceptions to signal a timeout. They are not available in the processing namespace so you need to import them from Queue.

Warning

If a process is killed using the terminate() method or os.kill() while it is trying to use a Queue then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on.

Warning

As mentioned above, if a child process has put items on a queue (and it has not used cancelJoin()) then that process will not terminate until all buffered items have been flushed to the pipe.

This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children.

Note that a queue created using a manager does not have this issue. See Programming Guidelines.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/queue-objects.txt000066400000000000000000000121211455552142400246310ustar00rootroot00000000000000.. include:: header.txt =============== Queue objects =============== The queue type provided by `processing` is a multi-producer, multi-consumer FIFO queue modelled on the `Queue.Queue` class in the standard library. `Queue(maxsize=0)` Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. `Queue.Queue` implements all the methods of `Queue.Queue` except for `qsize()`, `task_done()` and `join()`. `empty()` Return `True` if the queue is empty, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `full()` Return `True` if the queue is full, `False` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. `put(item, block=True, timeout=None)` Put item into the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Full` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the `Full` exception (`timeout` is ignored in that case). `put_nowait(item)`, `putNoWait(item)` Equivalent to `put(item, False)`. `get(block=True, timeout=None)` Remove and return an item from the queue. If optional args `block` is true and `timeout` is `None` (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the `Empty` exception if no item was available within that time. Otherwise (block is false), return an item if one is immediately available, else raise the `Empty` exception (`timeout` is ignored in that case). `get_nowait()`, `getNoWait()` Equivalent to `get(False)`. `processing.Queue` has a few additional methods not found in `Queue.Queue` which are usually unnecessary: `putMany(iterable)` If the queue has infinite size then this adds all items in the iterable to the queue's buffer. So `q.putMany(X)` is a faster alternative to `for x in X: q.put(x)`. Raises an error if the queue has finite size. `close()` Indicates that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. `joinThread()` This joins the background thread and can only be used after `close()` has been called. This blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call `cancelJoin()` to prevent this behaviour. `cancelJoin()` Prevents the background thread from being joined automatically when the process exits. Unnecessary if the current process created the queue. .. admonition:: `Empty` and `Full` `processing` uses the usual `Queue.Empty` and `Queue.Full` exceptions to signal a timeout. They are not available in the `processing` namespace so you need to import them from `Queue`. .. warning:: If a process is killed using the `terminate()` method or `os.kill()` while it is trying to use a `Queue` then the data in the queue is likely to become corrupted. This may cause any other processes to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used `cancelJoin()`) then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all it non-daemonic children. Note that a queue created using a manager does not have this issue. See `Programming Guidelines `_. .. _Prev: process-objects.html .. _Up: processing-ref.html .. _Next: connection-objects.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/sharedctypes.html000066400000000000000000000241571455552142400247150ustar00rootroot00000000000000 Shared ctypes objects
Prev         Up         Next

Shared ctypes objects

The processing.sharedctypes module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the ctypes package.)

The functions in the module are

RawArray(typecode_or_type, size_or_initializer)

Returns a ctypes array allocated from shared memory.

typecode_or_type determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the array module. If size_or_initializer is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise size_or_initializer is a sequence which is used to initialize the array and whose length determines the length of the array.

Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock.

RawValue(typecode_or_type, *args)

Returns a ctypes object allocated from shared memory.

typecode_or_type determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the array module. *args is passed on to the constructor for the type.

Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock.

Note that an array of ctypes.c_char has value and rawvalue attributes which allow one to use it to store and retrieve strings -- see documentation for ctypes.

Array(typecode_or_type, size_or_initializer, **, lock=True)

The same as RawArray() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes array.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

Value(typecode_or_type, *args, **, lock=True)

The same as RawValue() except that depending on the value of lock a process-safe synchronization wrapper may be returned instead of a raw ctypes object.

If lock is true (the default) then a new lock object is created to synchronize access to the value. If lock is a Lock or RLock object then that will be used to synchronize access to the value. If lock is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe".

Note that lock is a keyword only argument.

copy(obj)
Returns a ctypes object allocated from shared memory which is a copy of the ctypes object obj.
synchronized(obj, lock=None)

Returns a process-safe wrapper object for a ctypes object which uses lock to synchronize access. If lock is None then a processing.RLock object is created automatically.

A synchronized wrapper will have two methods in addition to those of the object it wraps: getobj() returns the wrapped object and getlock() returns the lock object used for synchronization.

Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object.

Equivalences

The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table MyStruct is some subclass of ctypes.Structure.)

ctypes sharedctypes using type sharedctypes using typecode
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)  
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))

Example

Below is an example where a number of ctypes objects are modified by a child process

from processing import Process, Lock
from processing.sharedctypes import Value, Array
from ctypes import Structure, c_double

class Point(Structure):
    _fields_ = [('x', c_double), ('y', c_double)]

def modify(n, x, s, A):
    n.value **= 2
    x.value **= 2
    s.value = s.value.upper()
    for p in A:
        p.x **= 2
        p.y **= 2

if __name__ == '__main__':
    lock = Lock()

    n = Value('i', 7)
    x = Value(ctypes.c_double, 1.0/3.0, lock=False)
    s = Array('c', 'hello world', lock=lock)
    A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print n.value
    print x.value
    print s.value
    print [(p.x, p.y) for p in A]

The results printed are

49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]

Avoid sharing pointers

Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash.

uqfoundation-multiprocess-b3457a5/pypy3.9/doc/sharedctypes.txt000066400000000000000000000143071455552142400245640ustar00rootroot00000000000000.. include:: header.txt ======================== Shared ctypes objects ======================== The `processing.sharedctypes` module provides functions for allocating ctypes objects from shared memory which can be inherited by child processes. (See the standard library's documentation for details of the `ctypes` package.) The functions in the module are `RawArray(typecode_or_type, size_or_initializer)` Returns a ctypes array allocated from shared memory. `typecode_or_type` determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the `array` module. If `size_or_initializer` is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise `size_or_initializer` is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use Array() instead to make sure that access is automatically synchronized using a lock. `RawValue(typecode_or_type, *args)` Returns a ctypes object allocated from shared memory. `typecode_or_type` determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the `array` module. `*args` is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use Value() instead to make sure that access is automatically synchronized using a lock. Note that an array of `ctypes.c_char` has `value` and `rawvalue` attributes which allow one to use it to store and retrieve strings -- see documentation for `ctypes`. `Array(typecode_or_type, size_or_initializer, **, lock=True)` The same as `RawArray()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `Value(typecode_or_type, *args, **, lock=True)` The same as `RawValue()` except that depending on the value of `lock` a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If `lock` is true (the default) then a new lock object is created to synchronize access to the value. If `lock` is a `Lock` or `RLock` object then that will be used to synchronize access to the value. If `lock` is false then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that `lock` is a keyword only argument. `copy(obj)` Returns a ctypes object allocated from shared memory which is a copy of the ctypes object `obj`. `synchronized(obj, lock=None)` Returns a process-safe wrapper object for a ctypes object which uses `lock` to synchronize access. If `lock` is `None` then a `processing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: `getobj()` returns the wrapped object and `getlock()` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. Equivalences ============ The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table `MyStruct` is some subclass of `ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Example ======= Below is an example where a number of ctypes objects are modified by a child process :: from processing import Process, Lock from processing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for p in A: p.x **= 2 p.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(ctypes.c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print n.value print x.value print s.value print [(p.x, p.y) for p in A] The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. admonition:: Avoid sharing pointers Although it is posible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. _Prev: pool-objects.html .. _Up: processing-ref.html .. _Next: connection-ref.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/tests.html000066400000000000000000000060761455552142400233610ustar00rootroot00000000000000 Tests and Examples
Prev         Up         Next

Tests and Examples

processing contains a test sub-package which contains unit tests for the package. You can do a test run by doing

python -m processing.tests

on Python 2.5 or

python -c "from processing.tests import main; main()"

on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager.

The example sub-package contains the following modules:

ex_newtype.py
Demonstration of how to create and use customized managers and proxies.
ex_pool.py
Test of the Pool class which represents a process pool.
ex_synchronize.py
Test of synchronization types like locks, conditions and queues.
ex_workers.py
A test showing how to use queues to feed tasks to a collection of worker process and collect the results.
ex_webserver.py
An example of how a pool of worker processes can each run a SimpleHTTPServer.HttpServer instance while sharing a single listening socket.
benchmarks.py
Some simple benchmarks comparing processing with threading.
uqfoundation-multiprocess-b3457a5/pypy3.9/doc/tests.txt000066400000000000000000000027331455552142400232300ustar00rootroot00000000000000.. include:: header.txt Tests and Examples ================== `processing` contains a `test` sub-package which contains unit tests for the package. You can do a test run by doing :: python -m processing.tests on Python 2.5 or :: python -c "from processing.tests import main; main()" on Python 2.4. This will run many of the tests using processes, threads, and processes with a manager. The `example` sub-package contains the following modules: `ex_newtype.py <../examples/ex_newtype.py>`_ Demonstration of how to create and use customized managers and proxies. `ex_pool.py <../examples/ex_pool.py>`_ Test of the `Pool` class which represents a process pool. `ex_synchronize.py <../examples/ex_synchronize.py>`_ Test of synchronization types like locks, conditions and queues. `ex_workers.py <../examples/ex_workers.py>`_ A test showing how to use queues to feed tasks to a collection of worker process and collect the results. `ex_webserver.py <../examples/ex_webserver.py>`_ An example of how a pool of worker processes can each run a `SimpleHTTPServer.HttpServer` instance while sharing a single listening socket. `benchmarks.py <../examples/benchmarks.py>`_ Some simple benchmarks comparing `processing` with `threading`. .. _Prev: programming-guidelines.html .. _Up: index.html .. _Next: tests.html uqfoundation-multiprocess-b3457a5/pypy3.9/doc/version.txt000066400000000000000000000000341455552142400235430ustar00rootroot00000000000000.. |version| replace:: 0.52 uqfoundation-multiprocess-b3457a5/pypy3.9/examples/000077500000000000000000000000001455552142400223715ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/examples/FAILS.txt000066400000000000000000000101261455552142400237700ustar00rootroot00000000000000=== 3.1 --- $ python ex_newtype.py Traceback (most recent call last): File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 104, in import _hashlib ImportError: dlopen(/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so, 2): Library not loaded: /opt/local/lib/libssl.1.0.0.dylib Referenced from: /opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/lib-dynload/_hashlib.so Reason: image not found During handling of the above exception, another exception occurred: Traceback (most recent call last): File "ex_newtype.py", line 77, in test() File "ex_newtype.py", line 52, in test f1 = manager.Foo1() File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 669, in temp token, exp = self._create(typeid, *args, **kwds) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/managers.py", line 567, in _create conn = self._Client(self._address, authkey=self._authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 178, in Client answer_challenge(c, authkey) File "/Users/mmckerns/lib/python3.1/site-packages/multiprocess/connection.py", line 418, in answer_challenge digest = hmac.new(authkey, message).digest() File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 140, in new return HMAC(key, msg, digestmod) File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hmac.py", line 46, in __init__ import hashlib File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 135, in md5 = __get_builtin_constructor('md5') File "/opt/local/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/hashlib.py", line 62, in __get_builtin_constructor import _md5 ImportError: No module named _md5 $ python ex_pool.py SyntaxError: can not delete variable 'pool' referenced in nested scope === 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8 (with 'fork', 'spawn'+recurse=True) --- $ python ex_pool.py Testing garbage collection: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 288, in test assert not worker.is_alive() AssertionError === 3.8 (with 'spawn'+recurse=False) --- $ python ex_pool.py Ordered results using pool.apply_async(): multiprocess.pool.RemoteTraceback: """ Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) File "ex_pool.py", line 16, in calculate result = func(*args) File "ex_pool.py", line 24, in mul time.sleep(0.5*random.random()) NameError: name 'time' is not defined """ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "ex_pool.py", line 295, in test() File "ex_pool.py", line 68, in test print('\t', r.get()) File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/pool.py", line 768, in get raise self._value NameError: name 'time' is not defined $ python ex_synchronize.py 10 Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_synchronize.py", line 17, in value_func random.seed() NameError: name 'random' is not defined $ python ex_workers.py Unordered results: Process Process-1: Traceback (most recent call last): File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 313, in _bootstrap self.run() File "/Users/mmckerns/lib/python3.8/site-packages/multiprocess/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "ex_workers.py", line 23, in worker result = calculate(func, args) NameError: name 'calculate' is not defined uqfoundation-multiprocess-b3457a5/pypy3.9/examples/__init__.py000066400000000000000000000000001455552142400244700ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/examples/benchmarks.py000066400000000000000000000131321455552142400250600ustar00rootroot00000000000000# # Simple benchmarks for the processing package # import time, sys, multiprocess as processing, threading, queue as Queue, gc processing.freezeSupport = processing.freeze_support if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in range(iterations): q.put(a) # q.putMany((a for i in range(iterations)) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print(iterations, 'objects passed through the queue in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in range(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = processing.Pipe() cond = processing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = processing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print(iterations, 'objects passed through connection in',elapsed,'seconds') print('average number/sec:', iterations/elapsed) #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): a = seq[5] elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in range(iterations): l.acquire() l.release() elapsed = _timer()-t print(iterations, 'iterations in', elapsed, 'seconds') print('average number/sec:', iterations/elapsed) #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in range(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in range(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print(iterations * 2, 'waits in', elapsed, 'seconds') print('average number/sec:', iterations * 2 / elapsed) #### def test(): manager = processing.Manager() gc.disable() print('\n\t######## testing Queue.Queue\n') test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print('\n\t######## testing processing.Queue\n') test_queuespeed(processing.Process, processing.Queue(), processing.Condition()) print('\n\t######## testing Queue managed by server process\n') test_queuespeed(processing.Process, manager.Queue(), manager.Condition()) print('\n\t######## testing processing.Pipe\n') test_pipespeed() print print('\n\t######## testing list\n') test_seqspeed(range(10)) print('\n\t######## testing list managed by server process\n') test_seqspeed(manager.list(range(10))) print('\n\t######## testing Array("i", ..., lock=False)\n') test_seqspeed(processing.Array('i', range(10), lock=False)) print('\n\t######## testing Array("i", ..., lock=True)\n') test_seqspeed(processing.Array('i', range(10), lock=True)) print() print('\n\t######## testing threading.Lock\n') test_lockspeed(threading.Lock()) print('\n\t######## testing threading.RLock\n') test_lockspeed(threading.RLock()) print('\n\t######## testing processing.Lock\n') test_lockspeed(processing.Lock()) print('\n\t######## testing processing.RLock\n') test_lockspeed(processing.RLock()) print('\n\t######## testing lock managed by server process\n') test_lockspeed(manager.Lock()) print('\n\t######## testing rlock managed by server process\n') test_lockspeed(manager.RLock()) print() print('\n\t######## testing threading.Condition\n') test_conditionspeed(threading.Thread, threading.Condition()) print('\n\t######## testing processing.Condition\n') test_conditionspeed(processing.Process, processing.Condition()) print('\n\t######## testing condition managed by a server process\n') test_conditionspeed(processing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': processing.freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.9/examples/ex_newtype.py000066400000000000000000000030731455552142400251350ustar00rootroot00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # from multiprocess import freeze_support as freezeSupport from multiprocess.managers import BaseManager, IteratorProxy as BaseProxy ## class Foo(object): def f(self): print('you called Foo.f()') def g(self): print('you called Foo.g()') def _h(self): print('you called Foo._h()') # A simple generator function def baz(): for i in range(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): def __iter__(self): return self def __next__(self): return self._callmethod('__next__') ## class MyManager(BaseManager): pass # register the Foo class; make all public methods accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make only `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) ## def test(): manager = MyManager() manager.start() print('-' * 20) f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') print('-' * 20) f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') print('-' * 20) it = manager.baz() for i in it: print('<%d>' % i, end=' ') print() ## if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.9/examples/ex_pool.py000066400000000000000000000155061455552142400244170ustar00rootroot00000000000000# # A test of `processing.Pool` class # from multiprocess import Pool, TimeoutError from multiprocess import cpu_count as cpuCount, current_process as currentProcess, freeze_support as freezeSupport, active_children as activeChildren import time, random, sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print('cpuCount() = %d\n' % cpuCount()) # # Create pool # PROCESSES = 4 print('Creating pool with %d processes\n' % PROCESSES) pool = Pool(PROCESSES) # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print('Ordered results using pool.apply_async():') for r in results: print('\t', r.get()) print() print('Ordered results using pool.imap():') for x in imap_it: print('\t', x) print() print('Unordered results using pool.imap_unordered():') for x in imap_unordered_it: print('\t', x) print() print('Ordered results using pool.map() --- will block till complete:') for x in pool.map(calculatestar, TASKS): print('\t', x) print() # # Simple benchmarks # N = 100000 print('def pow3(x): return x**3') t = time.time() A = list(map(pow3, range(N))) print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() B = pool.map(pow3, range(N)) print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \ (N, time.time() - t)) t = time.time() C = list(pool.imap(pow3, range(N), chunksize=N//8)) print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() L = [None] * 1000000 print('def noop(x): pass') print('L = [None] * 1000000') t = time.time() A = list(map(noop, L)) print('\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() B = pool.map(noop, L) print('\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t)) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t)) assert A == B == C, (len(A), len(B), len(C)) print() del A, B, C, L # # Test error handling # print('Testing error handling:') try: print(pool.apply(f, (5,))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.apply()') else: raise AssertionError('expected ZeroDivisionError') try: print(pool.map(f, range(10))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from pool.map()') else: raise AssertionError('expected ZeroDivisionError') try: print(list(pool.imap(f, range(10)))) except ZeroDivisionError: print('\tGot ZeroDivisionError as expected from list(pool.imap())') else: raise AssertionError('expected ZeroDivisionError') it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError('expected ZeroDivisionError') assert i == 9 print('\tGot ZeroDivisionError as expected from IMapIterator.next()') print() # # Testing timeouts # print('Testing ApplyResult.get() with timeout:', end='') res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except TimeoutError: sys.stdout.write('.') print() print() print('Testing IMapIterator.next() with timeout:', end='') it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except TimeoutError: sys.stdout.write('.') print() print() # # Testing callback # print('Testing callback:') A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print('\tcallbacks succeeded\n') else: print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print('Testing close():') for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print('\tclose() succeeded\n') # # Check terminate() method # print('Testing terminate():') pool = Pool(2) ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print('\tterminate() succeeded\n') # # Check garbage collection # print('Testing garbage collection:') pool = Pool(2) processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [10]) for i in range(10)] del results, pool time.sleep(0.2) for worker in processes: assert not worker.is_alive() print('\tgarbage collection succeeded\n') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.9/examples/ex_synchronize.py000066400000000000000000000144041455552142400260150ustar00rootroot00000000000000# # A test file for the `processing` package # import time, sys, random from queue import Empty import multiprocess as processing # may get overwritten processing.currentProcess = processing.current_process processing.freezeSupport = processing.freeze_support processing.activeChildren = processing.active_children #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print('\n\t\t\t' + str(processing.currentProcess()) + ' has finished') running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = processing.Value('i', TASKS) mutex = processing.Lock() for i in range(TASKS): processing.Process(target=value_func, args=(running, mutex)).start() while running.value > 0: time.sleep(0.08) mutex.acquire() print(running.value, end=' ') sys.stdout.flush() mutex.release() print() print('No more running processes') #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = processing.Queue() p = processing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print(o, end=' ') sys.stdout.flush() except Empty: print('TIMEOUT') print() #### TEST_CONDITION def condition_func(cond): cond.acquire() print('\t' + str(cond)) time.sleep(2) print('\tchild is notifying') print('\t' + str(cond)) cond.notify() cond.release() def test_condition(): cond = processing.Condition() p = processing.Process(target=condition_func, args=(cond,)) print(cond) cond.acquire() print(cond) cond.acquire() print(cond) p.start() print('main is waiting') cond.wait() print('main has woken up') print(cond) cond.release() print(cond) cond.release() p.join() print(cond) #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print(running.value, 'tasks are running') mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print('%s has finished' % processing.currentProcess()) mutex.release() sema.release() def test_semaphore(): sema = processing.Semaphore(3) mutex = processing.RLock() running = processing.Value('i', 0) processes = [ processing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print('\tchild sleeping') time.sleep(5.5) print('\n\tchild terminating') def test_join_timeout(): p = processing.Process(target=join_timeout_func) p.start() print('waiting for process to finish') while 1: p.join(timeout=1) if not p.is_alive(): break print('.', end=' ') sys.stdout.flush() #### TEST_EVENT def event_func(event): print('\t%r is waiting' % processing.currentProcess()) event.wait() print('\t%r has woken up' % processing.currentProcess()) def test_event(): event = processing.Event() processes = [processing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print('main is sleeping') time.sleep(2) print('main is setting event') event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert list(a) == sa print('Tests passed') def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [processing.Value(id, v) for id, v in values] shared_arrays = [processing.Array(id, a) for id, a in arrays] p = processing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=processing): global processing processing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print('\n\t######## %s\n' % func.__name__) func() ignore = processing.activeChildren() # cleanup any old processes if hasattr(processing, '_debugInfo'): info = processing._debugInfo() if info: print(info) raise ValueError('there should be no positive refcounts left') if __name__ == '__main__': processing.freezeSupport() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print(' Using processes '.center(79, '-')) namespace = processing elif sys.argv[1] == 'manager': print(' Using processes and a manager '.center(79, '-')) namespace = processing.Manager() namespace.Process = processing.Process namespace.currentProcess = processing.currentProcess namespace.activeChildren = processing.activeChildren elif sys.argv[1] == 'threads': print(' Using threads '.center(79, '-')) import processing.dummy as namespace else: print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]) raise SystemExit(2) test(namespace) uqfoundation-multiprocess-b3457a5/pypy3.9/examples/ex_webserver.py000066400000000000000000000041001455552142400254360ustar00rootroot00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `processing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # import os import sys from multiprocess import Process, current_process as currentProcess, freeze_support as freezeSupport from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocess.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (currentProcess()._name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print('Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)) print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']) os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.9/examples/ex_workers.py000066400000000000000000000042241455552142400251350ustar00rootroot00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # import time import random from multiprocess import current_process as currentProcess, Process, freeze_support as freezeSupport from multiprocess import Queue # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (currentProcess()._name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks list(map(task_queue.put, TASKS1)) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print('Unordered results:') for i in range(len(TASKS1)): print('\t', done_queue.get()) # Add more tasks using `put()` instead of `putMany()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print('\t', done_queue.get()) # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freezeSupport() test() uqfoundation-multiprocess-b3457a5/pypy3.9/index.html000066400000000000000000000117511455552142400225550ustar00rootroot00000000000000 Python processing

Python processing

Author: R Oudkerk
Contact: roudkerk at users.berlios.de
Url:http://developer.berlios.de/projects/pyprocessing
Version: 0.52
Licence:BSD Licence

processing is a package for the Python language which supports the spawning of processes using the API of the standard library's threading module. It runs on both Unix and Windows.

Features:

  • Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
  • Objects can be shared between processes using a server process or (for simple data) shared memory.
  • Equivalents of all the synchronization primitives in threading are available.
  • A Pool class makes it easy to submit tasks to a pool of worker processes.

Examples

The processing.Process class follows the API of threading.Thread. For example

from processing import Process, Queue

def f(q):
    q.put('hello world')

if __name__ == '__main__':
    q = Queue()
    p = Process(target=f, args=[q])
    p.start()
    print q.get()
    p.join()

Synchronization primitives like locks, semaphores and conditions are available, for example

>>> from processing import Condition
>>> c = Condition()
>>> print c
<Condition(<RLock(None, 0)>), 0>
>>> c.acquire()
True
>>> print c
<Condition(<RLock(MainProcess, 1)>), 0>

One can also use a manager to create shared objects either in shared memory or in a server process, for example

>>> from processing import Manager
>>> manager = Manager()
>>> l = manager.list(range(10))
>>> l.reverse()
>>> print l
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> print repr(l)
<Proxy[list] object at 0x00E1B3B0>

Tasks can be offloaded to a pool of worker processes in various ways, for example

>>> from processing import Pool
>>> def f(x): return x*x
...
>>> p = Pool(4)
>>> result = p.mapAsync(f, range(10))
>>> print result.get(timeout=1)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
BerliOS Developer Logo
uqfoundation-multiprocess-b3457a5/pypy3.9/module/000077500000000000000000000000001455552142400220405ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/000077500000000000000000000000001455552142400247305ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/__init__.py000066400000000000000000000000001455552142400270270ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/interp_memory.py000066400000000000000000000010051455552142400301670ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from pypy.interpreter.error import oefmt from pypy.module.mmap.interp_mmap import W_MMap def address_of_buffer(space, w_obj): if space.config.objspace.usemodules.mmap: mmap = space.interp_w(W_MMap, w_obj) address = rffi.cast(rffi.SIZE_T, mmap.mmap.data) return space.newtuple2(space.newint(address), space.newint(mmap.mmap.size)) else: raise oefmt(space.w_TypeError, "cannot get address of buffer") uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/interp_semaphore.py000066400000000000000000000522731455552142400306570ustar00rootroot00000000000000import errno import os import sys import time from rpython.rlib import jit, rgc, rthread from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import GetSetProperty, TypeDef RECURSIVE_MUTEX, SEMAPHORE = range(2) sys_platform = sys.platform if sys.platform == 'win32': from rpython.rlib import rwin32 from pypy.module._multiprocessing.interp_win32_py3 import ( _GetTickCount, handle_w) SEM_VALUE_MAX = int(2**31-1) # max rffi.LONG _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], rwin32.HANDLE, save_err=rffi.RFFI_FULL_LASTERROR) _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], rwin32.BOOL, save_err=rffi.RFFI_SAVE_LASTERROR, releasegil=False) def sem_unlink(name): return None else: from rpython.rlib import rposix if sys.platform == 'darwin': libraries = [] else: libraries = ['rt'] eci = ExternalCompilationInfo( includes = ['sys/time.h', 'limits.h', 'semaphore.h', ], libraries = libraries, ) class CConfig: _compilation_info_ = eci TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)]) TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.TIME_T), ('tv_nsec', rffi.LONG)]) SEM_FAILED = platform.ConstantInteger('SEM_FAILED') SEM_VALUE_MAX = platform.DefinedConstantInteger('SEM_VALUE_MAX') SEM_TIMED_WAIT = platform.Has('sem_timedwait') SEM_T_SIZE = platform.SizeOf('sem_t') config = platform.configure(CConfig) TIMEVAL = config['TIMEVAL'] TIMESPEC = config['TIMESPEC'] TIMEVALP = rffi.CArrayPtr(TIMEVAL) TIMESPECP = rffi.CArrayPtr(TIMESPEC) SEM_T = rffi.COpaquePtr('sem_t', compilation_info=eci) # rffi.cast(SEM_T, config['SEM_FAILED']) SEM_FAILED = config['SEM_FAILED'] SEM_VALUE_MAX = config['SEM_VALUE_MAX'] if SEM_VALUE_MAX is None: # on Hurd SEM_VALUE_MAX = sys.maxint SEM_TIMED_WAIT = config['SEM_TIMED_WAIT'] SEM_T_SIZE = config['SEM_T_SIZE'] if sys.platform == 'darwin': HAVE_BROKEN_SEM_GETVALUE = True else: HAVE_BROKEN_SEM_GETVALUE = False def external(name, args, result, **kwargs): return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ _sem_close_no_errno = external('sem_close', [SEM_T], rffi.INT, releasegil=False) _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_wait = external('sem_wait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _sem_post = external('sem_post', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, TIMEVALP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) @jit.dont_look_inside def sem_open(name, oflag, mode, value): res = _sem_open(name, oflag, mode, value) if res == rffi.cast(SEM_T, SEM_FAILED): raise OSError(rposix.get_saved_errno(), "sem_open failed") return res def sem_close(handle): res = _sem_close(handle) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_close failed") def sem_unlink(name): res = _sem_unlink(name) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_unlink failed") def sem_wait(sem): res = _sem_wait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_wait failed") def sem_trywait(sem): res = _sem_trywait(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_trywait failed") def sem_timedwait(sem, deadline): res = _sem_timedwait(sem, deadline) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_timedwait failed") def _sem_timedwait_save(sem, deadline): delay = 0 void = lltype.nullptr(rffi.VOIDP.TO) with lltype.scoped_alloc(TIMEVALP.TO, 1) as tvdeadline: while True: # poll if _sem_trywait(sem) == 0: return 0 elif rposix.get_saved_errno() != errno.EAGAIN: return -1 now = gettimeofday() c_tv_sec = rffi.getintfield(deadline[0], 'c_tv_sec') c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec') if (c_tv_sec < now[0] or (c_tv_sec == now[0] and c_tv_nsec <= now[1])): rposix.set_saved_errno(errno.ETIMEDOUT) return -1 # calculate how much time is left difference = ((c_tv_sec - now[0]) * 1000000 + (c_tv_nsec - now[1])) # check delay not too long -- maximum is 20 msecs if delay > 20000: delay = 20000 if delay > difference: delay = difference delay += 1000 # sleep rffi.setintfield(tvdeadline[0], 'c_tv_sec', delay / 1000000) rffi.setintfield(tvdeadline[0], 'c_tv_usec', delay % 1000000) if _select(0, void, void, void, tvdeadline) < 0: return -1 if SEM_TIMED_WAIT: _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) else: _sem_timedwait = _sem_timedwait_save def sem_post(sem): res = _sem_post(sem) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_post failed") def sem_getvalue(sem): sval_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: res = _sem_getvalue(sem, sval_ptr) if res < 0: raise OSError(rposix.get_saved_errno(), "sem_getvalue failed") return rffi.cast(lltype.Signed, sval_ptr[0]) finally: lltype.free(sval_ptr, flavor='raw') def gettimeofday(): now = lltype.malloc(TIMEVALP.TO, 1, flavor='raw') try: res = _gettimeofday(now, None) if res < 0: raise OSError(rposix.get_saved_errno(), "gettimeofday failed") return (rffi.getintfield(now[0], 'c_tv_sec'), rffi.getintfield(now[0], 'c_tv_usec')) finally: lltype.free(now, flavor='raw') def handle_w(space, w_handle): return rffi.cast(SEM_T, space.int_w(w_handle)) # utilized by POSIX and win32 def semaphore_unlink(space, w_name): name = space.text_w(w_name) try: sem_unlink(name) except OSError as e: raise wrap_oserror(space, e) class CounterState: def __init__(self, space): self.counter = 0 def _cleanup_(self): self.counter = 0 def getCount(self): value = self.counter self.counter += 1 return value # These functions may raise bare OSError or WindowsError, # don't forget to wrap them into OperationError if sys.platform == 'win32': def create_semaphore(space, name, val, max): rwin32.SetLastError_saved(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS err = rwin32.GetLastError_saved() if err != 0: raise WindowsError(err, "CreateSemaphore") return handle def delete_semaphore(handle): _CloseHandle_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: full_msecs = 0 elif space.is_none(w_timeout): full_msecs = rwin32.INFINITE else: timeout = space.float_w(w_timeout) timeout *= 1000.0 if timeout < 0.0: timeout = 0.0 elif timeout >= 0.5 * rwin32.INFINITE: # 25 days raise oefmt(space.w_OverflowError, "timeout is too large") full_msecs = r_uint(int(timeout + 0.5)) # check whether we can acquire without blocking res = rwin32.WaitForSingleObject(self.handle, 0) if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True msecs = full_msecs start = _GetTickCount() while True: from pypy.module.time.interp_time import State interrupt_event = space.fromcache(State).get_interrupt_event() handles = [self.handle, interrupt_event] # do the wait rwin32.ResetEvent(interrupt_event) res = rwin32.WaitForMultipleObjects(handles, timeout=msecs) if res != rwin32.WAIT_OBJECT_0 + 1: break # got SIGINT so give signal handler a chance to run time.sleep(0.001) # if this is main thread let KeyboardInterrupt be raised _check_signals(space) # recalculate timeout if msecs != rwin32.INFINITE: ticks = _GetTickCount() if r_uint(ticks - start) >= full_msecs: return False msecs = full_msecs - r_uint(ticks - start) # handle result if res != rwin32.WAIT_TIMEOUT: self.last_tid = rthread.get_ident() self.count += 1 return True return False def semlock_release(self, space): if not _ReleaseSemaphore(self.handle, 1, lltype.nullptr(rffi.LONGP.TO)): err = rwin32.GetLastError_saved() if err == 0x0000012a: # ERROR_TOO_MANY_POSTS raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: raise WindowsError(err, "ReleaseSemaphore") def semlock_getvalue(self, space): if rwin32.WaitForSingleObject(self.handle, 0) == rwin32.WAIT_TIMEOUT: return 0 previous_ptr = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw') try: if not _ReleaseSemaphore(self.handle, 1, previous_ptr): raise rwin32.lastSavedWindowsError("ReleaseSemaphore") return intmask(previous_ptr[0]) + 1 finally: lltype.free(previous_ptr, flavor='raw') def semlock_iszero(self, space): return semlock_getvalue(self, space) == 0 else: def create_semaphore(space, name, val, max): sem = sem_open(name, os.O_CREAT | os.O_EXCL, 0600, val) rgc.add_memory_pressure(SEM_T_SIZE) return sem def reopen_semaphore(name): sem = sem_open(name, 0, 0600, 0) rgc.add_memory_pressure(SEM_T_SIZE) return sem def delete_semaphore(handle): _sem_close_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: deadline = lltype.nullptr(TIMESPECP.TO) elif space.is_none(w_timeout): deadline = lltype.nullptr(TIMESPECP.TO) else: timeout = space.float_w(w_timeout) sec = int(timeout) nsec = int(1e9 * (timeout - sec) + 0.5) now_sec, now_usec = gettimeofday() deadline = lltype.malloc(TIMESPECP.TO, 1, flavor='raw') rffi.setintfield(deadline[0], 'c_tv_sec', now_sec + sec) rffi.setintfield(deadline[0], 'c_tv_nsec', now_usec * 1000 + nsec) val = (rffi.getintfield(deadline[0], 'c_tv_sec') + rffi.getintfield(deadline[0], 'c_tv_nsec') / 1000000000) rffi.setintfield(deadline[0], 'c_tv_sec', val) val = rffi.getintfield(deadline[0], 'c_tv_nsec') % 1000000000 rffi.setintfield(deadline[0], 'c_tv_nsec', val) try: while True: try: if not block: sem_trywait(self.handle) elif not deadline: sem_wait(self.handle) else: sem_timedwait(self.handle, deadline) except OSError as e: if e.errno == errno.EINTR: # again _check_signals(space) continue elif e.errno in (errno.EAGAIN, errno.ETIMEDOUT): return False raise _check_signals(space) self.last_tid = rthread.get_ident() self.count += 1 return True finally: if deadline: lltype.free(deadline, flavor='raw') def semlock_release(self, space): if self.kind == RECURSIVE_MUTEX: sem_post(self.handle) return if HAVE_BROKEN_SEM_GETVALUE: # We will only check properly the maxvalue == 1 case if self.maxvalue == 1: # make sure that already locked try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise # it is already locked as expected else: # it was not locked so undo wait and raise sem_post(self.handle) raise oefmt(space.w_ValueError, "semaphore or lock released too many times") else: # This check is not an absolute guarantee that the semaphore does # not rise above maxvalue. if sem_getvalue(self.handle) >= self.maxvalue: raise oefmt(space.w_ValueError, "semaphore or lock released too many times") sem_post(self.handle) def semlock_getvalue(self, space): if HAVE_BROKEN_SEM_GETVALUE: raise oefmt(space.w_NotImplementedError, "sem_getvalue is not implemented on this system") else: val = sem_getvalue(self.handle) # some posix implementations use negative numbers to indicate # the number of waiting threads if val < 0: val = 0 return val def semlock_iszero(self, space): if HAVE_BROKEN_SEM_GETVALUE: try: sem_trywait(self.handle) except OSError as e: if e.errno != errno.EAGAIN: raise return True else: sem_post(self.handle) return False else: return semlock_getvalue(self, space) == 0 class W_SemLock(W_Root): def __init__(self, space, handle, kind, maxvalue, name): self.handle = handle self.kind = kind self.count = 0 self.maxvalue = maxvalue self.register_finalizer(space) self.last_tid = -1 self.name = name def name_get(self, space): if self.name is None: return space.w_None return space.newtext(self.name) def kind_get(self, space): return space.newint(self.kind) def maxvalue_get(self, space): return space.newint(self.maxvalue) def handle_get(self, space): h = rffi.cast(rffi.INTPTR_T, self.handle) return space.newint(h) def get_count(self, space): return space.newint(self.count) def _ismine(self): return self.count > 0 and rthread.get_ident() == self.last_tid def is_mine(self, space): return space.newbool(self._ismine()) def is_zero(self, space): try: res = semlock_iszero(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newbool(res) def get_value(self, space): try: val = semlock_getvalue(self, space) except OSError as e: raise wrap_oserror(space, e) return space.newint(val) @unwrap_spec(block=bool) def acquire(self, space, block=True, w_timeout=None): # check whether we already own the lock if self.kind == RECURSIVE_MUTEX and self._ismine(): self.count += 1 return space.w_True try: # sets self.last_tid and increments self.count # those steps need to be as close as possible to # acquiring the semlock for self._ismine() to support # multiple threads got = semlock_acquire(self, space, block, w_timeout) except OSError as e: raise wrap_oserror(space, e) if got: return space.w_True else: return space.w_False def release(self, space): if self.kind == RECURSIVE_MUTEX: if not self._ismine(): raise oefmt(space.w_AssertionError, "attempt to release recursive lock not owned by " "thread") if self.count > 1: self.count -= 1 return try: # Note: a succesful semlock_release() must not release the GIL, # otherwise there is a race condition on self.count semlock_release(self, space) self.count -= 1 except OSError as e: raise wrap_oserror(space, e) def after_fork(self): self.count = 0 @unwrap_spec(kind=int, maxvalue=int, name='text_or_none') def rebuild(space, w_cls, w_handle, kind, maxvalue, name): # if sys_platform != 'win32' and name is not None: # like CPython, in this case ignore 'w_handle' try: handle = reopen_semaphore(name) except OSError as e: raise wrap_oserror(space, e) else: handle = handle_w(space, w_handle) # self = space.allocate_instance(W_SemLock, w_cls) self.__init__(space, handle, kind, maxvalue, name) return self def enter(self, space): return self.acquire(space, w_timeout=space.w_None) def exit(self, space, __args__): self.release(space) def _finalize_(self): delete_semaphore(self.handle) @unwrap_spec(kind=int, value=int, maxvalue=int, name='text', unlink=int) def descr_new(space, w_subtype, kind, value, maxvalue, name, unlink): if kind != RECURSIVE_MUTEX and kind != SEMAPHORE: raise oefmt(space.w_ValueError, "unrecognized kind") counter = space.fromcache(CounterState).getCount() try: handle = create_semaphore(space, name, value, maxvalue) if unlink: sem_unlink(name) name = None except OSError as e: raise wrap_oserror(space, e) self = space.allocate_instance(W_SemLock, w_subtype) self.__init__(space, handle, kind, maxvalue, name) return self W_SemLock.typedef = TypeDef( "SemLock", __new__ = interp2app(descr_new), kind = GetSetProperty(W_SemLock.kind_get), maxvalue = GetSetProperty(W_SemLock.maxvalue_get), handle = GetSetProperty(W_SemLock.handle_get), name = GetSetProperty(W_SemLock.name_get), _count = interp2app(W_SemLock.get_count), _is_mine = interp2app(W_SemLock.is_mine), _is_zero = interp2app(W_SemLock.is_zero), _get_value = interp2app(W_SemLock.get_value), acquire = interp2app(W_SemLock.acquire), release = interp2app(W_SemLock.release), _rebuild = interp2app(W_SemLock.rebuild.im_func, as_classmethod=True), _after_fork = interp2app(W_SemLock.after_fork), __enter__=interp2app(W_SemLock.enter), __exit__=interp2app(W_SemLock.exit), SEM_VALUE_MAX=SEM_VALUE_MAX, ) def _check_signals(space): space.getexecutioncontext().checksignals() uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/interp_win32_py3.py000066400000000000000000000032231455552142400304200ustar00rootroot00000000000000from rpython.rtyper.lltypesystem import rffi from rpython.rlib._rsocket_rffi import socketclose, geterrno, socketrecv, send from rpython.rlib import rwin32 from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec def getWindowsError(space): errno = geterrno() message = rwin32.FormatErrorW(errno) w_errcode = space.newint(errno) return OperationError(space.w_WindowsError, space.newtuple([w_errcode, space.newtext(*message), space.w_None, w_errcode])) @unwrap_spec(handle=int) def multiprocessing_closesocket(space, handle): res = socketclose(handle) if res != 0: raise getWindowsError(space) @unwrap_spec(handle=int, buffersize=int) def multiprocessing_recv(space, handle, buffersize): with rffi.scoped_alloc_buffer(buffersize) as buf: read_bytes = socketrecv(handle, buf.raw, buffersize, 0) if read_bytes >= 0: return space.newbytes(buf.str(read_bytes)) raise getWindowsError(space) @unwrap_spec(handle=int, data='bufferstr') def multiprocessing_send(space, handle, data): if data is None: raise OperationError(space.w_ValueError, 'data cannot be None') with rffi.scoped_nonmovingbuffer(data) as dataptr: # rsocket checks for writability of socket with wait_for_data, cpython does check res = send(handle, dataptr, len(data), 0) if res < 0: raise getWindowsError(space) return space.newint(res) def handle_w(space, w_handle): return rffi.cast(rwin32.HANDLE, space.int_w(w_handle)) _GetTickCount = rwin32.winexternal( 'GetTickCount', [], rwin32.DWORD) uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/moduledef.py000066400000000000000000000010531455552142400272450ustar00rootroot00000000000000import sys from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): interpleveldefs = { 'SemLock' : 'interp_semaphore.W_SemLock', } appleveldefs = { } if sys.platform == 'win32': interpleveldefs['closesocket'] = 'interp_win32_py3.multiprocessing_closesocket' interpleveldefs['recv'] = 'interp_win32_py3.multiprocessing_recv' interpleveldefs['send'] = 'interp_win32_py3.multiprocessing_send' interpleveldefs['sem_unlink'] = 'interp_semaphore.semaphore_unlink' uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/000077500000000000000000000000001455552142400257075ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/__init__.py000066400000000000000000000000001455552142400300060ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/test_interp_semaphore.py000066400000000000000000000042471455552142400326730ustar00rootroot00000000000000import pytest import time import sys from rpython.rlib.rgil import yield_thread from pypy.interpreter.gateway import interp2app from pypy.module.thread.os_lock import _set_sentinel from pypy.module.thread.os_thread import start_new_thread from pypy.module._multiprocessing.interp_semaphore import ( create_semaphore, delete_semaphore, W_SemLock, sem_unlink) @pytest.mark.skipif(sys.platform == 'win32', reason='hangs on win32') @pytest.mark.parametrize('spaceconfig', [ {'usemodules': ['_multiprocessing', 'thread']}]) @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semlock_release(space): # trigger the setup() code in time.moduledef space.getbuiltinmodule('time') sem_name = '/test8' _handle = create_semaphore(space, sem_name, 1, 1) try: sem_unlink(sem_name) w_lock = W_SemLock(space, _handle, 0, 1, None) created = [] successful = [] N_THREADS = 16 def run(space): w_sentinel = _set_sentinel(space) yield_thread() w_sentinel.descr_lock_acquire(space) # releases GIL try: yield_thread() created.append(w_sentinel) w_got = w_lock.acquire(space, w_timeout=space.newfloat(5.)) # releases GIL if space.is_true(w_got): yield_thread() w_lock.release(space) successful.append(w_sentinel) except: import traceback traceback.print_exc() raise w_run = space.wrap(interp2app(run)) w_lock.acquire(space) for _ in range(N_THREADS): start_new_thread(space, w_run, space.newtuple([])) # releases GIL deadline = time.time() + 5. while len(created) < N_THREADS: assert time.time() < deadline yield_thread() w_lock.release(space) for w_sentinel in created: # Join thread w_sentinel.descr_lock_acquire(space) # releases GIL w_sentinel.descr_lock_release(space) assert len(successful) == N_THREADS finally: delete_semaphore(_handle) uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/test_semaphore.py000066400000000000000000000147421455552142400313130ustar00rootroot00000000000000import py import sys import pytest from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) class AppTestSemaphore: spaceconfig = dict(usemodules=('_multiprocessing', 'thread', 'signal', 'select', 'binascii', 'struct', '_posixsubprocess')) if sys.platform == 'win32': spaceconfig['usemodules'] += ('_rawffi', '_cffi_backend') else: spaceconfig['usemodules'] += ('fcntl',) def setup_class(cls): cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE) cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX) cls.w_runappdirect = cls.space.wrap(cls.runappdirect) @py.test.mark.skipif("sys.platform == 'win32'") def test_sem_unlink(self): from _multiprocessing import sem_unlink import errno try: sem_unlink("non-existent") except OSError as e: assert e.errno in (errno.ENOENT, errno.EINVAL) else: assert 0, "should have raised" def test_semaphore_basic(self): from _multiprocessing import SemLock import sys assert SemLock.SEM_VALUE_MAX > 10 kind = self.SEMAPHORE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "1", unlink=True) assert sem.kind == kind assert sem.maxvalue == maxvalue assert isinstance(sem.handle, int) assert sem.name is None assert sem._count() == 0 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 1 assert sem._is_zero() == False sem.acquire() assert sem._is_mine() assert sem._count() == 1 if sys.platform == 'darwin': raises(NotImplementedError, 'sem._get_value()') else: assert sem._get_value() == 0 assert sem._is_zero() == True sem.release() assert sem._count() == 0 sem.acquire() sem._after_fork() assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_recursive(self): from _multiprocessing import SemLock kind = self.RECURSIVE value = 1 maxvalue = 1 # the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem = SemLock(kind, value, maxvalue, "2", unlink=True) sem.acquire() sem.release() assert sem._count() == 0 sem.acquire() sem.release() # now recursively sem.acquire() sem.acquire() assert sem._count() == 2 sem.release() sem.release() @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_maxvalue(self): from _multiprocessing import SemLock import sys kind = self.SEMAPHORE value = SemLock.SEM_VALUE_MAX maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.0", unlink=True) for i in range(10): res = sem.acquire() assert res == True assert sem._count() == i+1 if sys.platform != 'darwin': assert sem._get_value() == maxvalue - (i+1) value = 0 maxvalue = SemLock.SEM_VALUE_MAX sem = SemLock(kind, value, maxvalue, "3.1", unlink=True) for i in range(10): sem.release() assert sem._count() == -(i+1) if sys.platform != 'darwin': assert sem._get_value() == i+1 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_wait(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "3", unlink=True) res = sem.acquire() assert res == True res = sem.acquire(timeout=0.1) assert res == False @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_rebuild(self): import sys if sys.platform == 'win32': from _multiprocessing import SemLock def sem_unlink(*args): pass else: from _multiprocessing import SemLock, sem_unlink kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "4.2", unlink=False) try: sem2 = SemLock._rebuild(-1, kind, value, "4.2") #assert sem.handle != sem2.handle---even though they come # from different calls to sem_open(), on Linux at least, # they are the same pointer sem2 = SemLock._rebuild(sem.handle, kind, value, None) assert sem.handle == sem2.handle finally: sem_unlink("4.2") @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_semaphore_contextmanager(self): from _multiprocessing import SemLock kind = self.SEMAPHORE value = 1 maxvalue = 1 sem = SemLock(kind, value, maxvalue, "5", unlink=True) with sem: assert sem._count() == 1 assert sem._count() == 0 def test_unlink(self): from _multiprocessing import SemLock sem = SemLock(self.SEMAPHORE, 1, 1, '/mp-123', unlink=True) assert sem._count() == 0 @pytest.mark.skipif(sys.platform == 'darwin', reason="Hangs on macOSX") def test_in_threads(self): from _multiprocessing import SemLock from threading import Thread from time import sleep l = SemLock(0, 1, 1, "6", unlink=True) if self.runappdirect: def f(id): for i in range(10000): pass else: def f(id): for i in range(1000): # reduce the probability of thread switching # at exactly the wrong time in semlock_acquire for j in range(10): pass threads = [Thread(None, f, args=(i,)) for i in range(2)] [t.start() for t in threads] # if the RLock calls to sem_wait and sem_post do not match, # one of the threads will block and the call to join will fail [t.join() for t in threads] uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/test_win32.py000066400000000000000000000016211455552142400302620ustar00rootroot00000000000000import py import sys @py.test.mark.skipif('sys.platform != "win32"') class AppTestWin32: spaceconfig = dict(usemodules=('_multiprocessing', '_cffi_backend', 'signal', '_rawffi', 'binascii', '_socket', 'select')) def setup_class(cls): # import here since importing _multiprocessing imports multiprocessing # (in interp_connection) to get the BufferTooShort exception, which on # win32 imports msvcrt which imports via cffi which allocates ccharp # that are never released. This trips up the LeakChecker if done in a # test function cls.w_multiprocessing = cls.space.appexec([], '(): import multiprocessing as m; return m') def test_closesocket(self): from _multiprocessing import closesocket raises(WindowsError, closesocket, -1) uqfoundation-multiprocess-b3457a5/pypy3.9/module/_multiprocess/test/test_ztranslation.py000066400000000000000000000001651455552142400320520ustar00rootroot00000000000000from pypy.objspace.fake.checkmodule import checkmodule def test_checkmodule(): checkmodule('_multiprocessing') uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/000077500000000000000000000000001455552142400233045ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/__init__.py000066400000000000000000000035001455552142400254130ustar00rootroot00000000000000# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Original: Copyright (c) 2006-2008, R Oudkerk # Original: Licensed to PSF under a Contributor Agreement. # Forked by Mike McKerns, to support enhanced serialization. # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) sys.path.append(root) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(root, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(root, 'README.md')) del os, sys, root, get_license_text, get_readme_as_rst import sys from . import context # # Copy stuff from default context # __all__ = [x for x in dir(context._default_context) if not x.startswith('_')] globals().update((name, getattr(context._default_context, name)) for name in __all__) # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def license(): """print license""" print (__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/connection.py000066400000000000000000000761431455552142400260300ustar00rootroot00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] import io import os import sys import socket import struct import time import tempfile import itertools try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import util from . import AuthenticationError, BufferTooShort from .context import reduction _ForkingPickler = reduction.ForkingPickler try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE except ImportError: if sys.platform == 'win32': raise _winapi = None # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return getattr(time,'monotonic',time.time)() + timeout def _check_timeout(t): return getattr(time,'monotonic',time.time)() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str or util.is_abstract_socket_namespace(address): return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _ConnectionBase: _handle = None def __init__(self, handle, readable=True, writable=True): handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(_ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[offset // itemsize : (offset + size) // itemsize]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return _ForkingPickler.loads(buf.getbuffer()) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile(self._handle, bsize, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_multiprocessing.closesocket): _close(self._handle) _write = _multiprocessing.send _read = _multiprocessing.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: n = write(self._handle, buf) remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: chunk = read(handle, remaining) n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf): n = len(buf) if n > 0x7fffffff: pre_header = struct.pack("!i", -1) header = struct.pack("!Q", n) self._send(pre_header) self._send(header) self._send(buf) else: # For wire compatibility with 3.7 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if size == -1: buf = self._recv(8) size, = struct.unpack("!Q", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = family or (address and address_type(address)) \ or default_family address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() @property def address(self): return self._listener._address @property def last_accepted(self): return self._listener._last_accepted def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(True) s2.setblocking(True) c1 = Connection(s1.detach()) c2 = Connection(s2.detach()) else: fd1, fd2 = os.pipe() c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True): ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): # Linux abstract socket namespaces do not need to be explicitly unlinked self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() s.setblocking(True) return Connection(s.detach()) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) with socket.socket( getattr(socket, family) ) as s: s.setblocking(True) s.connect(address) return Connection(s.detach()) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: res = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac if not isinstance(authkey, bytes): raise ValueError( "Authkey must be bytes, not {0!s}".format(type(authkey))) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signalled. (Only # returning the first signalled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res+1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: ov, err = None, e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed... if ov and sys.getwindowsversion()[:2] >= (6, 2): # ... except on Windows 8 and later, where # the message HAS been consumed. try: _, err = ov.GetOverlappedResult(False) except OSError as e: err = e.winerror if not err and hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [o for o in object_list if o in ready_objects] else: import selectors # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): _WaitSelector = selectors.PollSelector else: _WaitSelector = selectors.SelectSelector def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' with _WaitSelector() as selector: for obj in object_list: selector.register(obj, selectors.EVENT_READ) if timeout is not None: deadline = getattr(time,'monotonic',time.time)() + timeout while True: ready = selector.select(timeout) if ready: return [key.fileobj for (key, events) in ready] else: if timeout is not None: timeout = deadline - getattr(time,'monotonic',time.time)() if timeout < 0: return ready # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(sock.detach(), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): handle = dh.detach() return PipeConnection(handle, readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): fd = df.detach() return Connection(fd, readable, writable) reduction.register(Connection, reduce_connection) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/context.py000066400000000000000000000260061455552142400253460ustar00rootroot00000000000000import os import sys import threading from . import process from . import reduction __all__ = () # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError current_process = staticmethod(process.current_process) parent_process = staticmethod(process.parent_process) active_children = staticmethod(process.active_children) def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, lock=True): '''Returns a synchronized shared object''' from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *, lock=True): '''Returns a synchronized shared array''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) from None ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method, force=False): raise ValueError('cannot set start method of concrete context') @property def reducer(self): '''Controls how objects will be reduced to a form that can be shared with other processes.''' return globals().get('reduction') @reducer.setter def reducer(self, reduction): globals()['reduction'] = reduction def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super().get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] if reduction.HAVE_SEND_HANDLE: methods.append('forkserver') return methods # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } if sys.platform == 'darwin': # bpo-33725: running arbitrary code after fork() is no longer reliable # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn else: _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/dummy/000077500000000000000000000000001455552142400244375ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/dummy/__init__.py000066400000000000000000000057651455552142400265650ustar00rootroot00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): if self._parent is not current_process(): raise RuntimeError( "Parent is {0!r} but current_process is {1!r}".format( self._parent, current_process())) self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/dummy/connection.py000066400000000000000000000030761455552142400271560ustar00rootroot00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Client', 'Listener', 'Pipe' ] from queue import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None @property def address(self): return self._backlog_queue def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False with self._in.not_empty: self._in.not_empty.wait(timeout) return self._in.qsize() > 0 def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/forkserver.py000066400000000000000000000275531455552142400260620ustar00rootroot00000000000000import errno import os import selectors import signal import socket import struct import sys import threading import warnings from . import connection from . import process from .context import reduction from . import resource_tracker from . import spawn from . import util __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def _stop(self): # Method used by unit tests to stop the server with self._lock: self._stop_unlocked() def _stop_unlocked(self): if self._forkserver_pid is None: return # close the "alive" file descriptor asks the server to stop os.close(self._forkserver_alive_fd) self._forkserver_alive_fd = None os.waitpid(self._forkserver_pid, 0) self._forkserver_pid = None if not util.is_abstract_socket_namespace(self._forkserver_address): os.unlink(self._forkserver_address) self._forkserver_address = None def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, resource_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: resource_tracker.ensure_running() if self._forkserver_pid is not None: # forkserver was launched before, is it still running? pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) if not pid: # still alive return # dead, launch it again os.close(self._forkserver_alive_fd) self._forkserver_address = None self._forkserver_alive_fd = None self._forkserver_pid = None cmd = ('from multiprocess.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = {x: y for x, y in data.items() if x in desired_keys} else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) if not util.is_abstract_socket_namespace(address): os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] pid = util.spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w self._forkserver_pid = pid # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = {sig: signal.signal(sig, val) for (sig, val) in handlers.items()} # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: returncode = os.waitstatus_to_exitcode(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise def _serve_one(child_r, fds, unused_fds, handlers): # close unnecessary stuff and reset signal handlers signal.set_wakeup_fd(-1) for sig, val in handlers.items(): signal.signal(sig, val) for fd in unused_fds: os.close(fd) (_forkserver._forkserver_alive_fd, resource_tracker._resource_tracker._fd, *_forkserver._inherited_fds) = fds # Run process object received over pipe parent_sentinel = os.dup(child_r) code = spawn._main(child_r, parent_sentinel) return code # # Read and write signed numbers # def read_signed(fd): data = b'' length = SIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return SIGNED_STRUCT.unpack(data)[0] def write_signed(fd, n): msg = SIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/heap.py000066400000000000000000000265521455552142400246050ustar00rootroot00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import bisect from collections import defaultdict import mmap import os import sys import tempfile import threading from .context import reduction, assert_spawning from . import util __all__ = ['BufferWrapper'] # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': import _winapi class Arena(object): """ A shared memory area backed by anonymous memory (Windows). """ _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if _winapi.GetLastError() == 0: break # We have reopened a preexisting mmap. buf.close() else: raise FileExistsError('Cannot find name for new mmap') self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state # Reopen existing mmap self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue 23060 #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS else: class Arena(object): """ A shared memory area backed by a temporary file (POSIX). """ if sys.platform == 'linux': _dir_candidates = ['/dev/shm'] else: _dir_candidates = [] def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: # Arena is created anew (if fd != -1, it means we're coming # from rebuild_arena() below) self.fd, name = tempfile.mkstemp( prefix='pym-%d-'%os.getpid(), dir=self._choose_dir(size)) os.unlink(name) util.Finalize(self, os.close, (self.fd,)) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def _choose_dir(self, size): # Choose a non-storage backed directory if possible, # to improve performance for d in self._dir_candidates: st = os.statvfs(d) if st.f_bavail * st.f_frsize >= size: # enough free space? return d return util.get_temp_dir() def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because ' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): # Minimum malloc() alignment _alignment = 8 _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() # Current arena allocation size self._size = size # A sorted list of available block sizes in arenas self._lengths = [] # Free block management: # - map each block size to a list of `(Arena, start, stop)` blocks self._len_to_seq = {} # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block # starting at that offset self._start_to_block = {} # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block # ending at that offset self._stop_to_block = {} # Map arenas to their `(Arena, start, stop)` blocks in use self._allocated_blocks = defaultdict(set) self._arenas = [] # List of pending blocks to free - see comment in free() below self._pending_free_blocks = [] # Statistics self._n_mallocs = 0 self._n_frees = 0 @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _new_arena(self, size): # Create a new arena with at least the given *size* length = self._roundup(max(self._size, size), mmap.PAGESIZE) # We carve larger and larger arenas, for efficiency, until we # reach a large-ish size (roughly L3 cache-sized) if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) def _discard_arena(self, arena): # Possibly delete the given (unused) arena length = arena.size # Reusing an existing arena is faster than creating a new one, so # we only reclaim space if it's large enough. if length < self._DISCARD_FREE_SPACE_LARGER_THAN: return blocks = self._allocated_blocks.pop(arena) assert not blocks del self._start_to_block[(arena, 0)] del self._stop_to_block[(arena, length)] self._arenas.remove(arena) seq = self._len_to_seq[length] seq.remove((arena, 0, length)) if not seq: del self._len_to_seq[length] self._lengths.remove(length) def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): return self._new_arena(size) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _add_free_block(self, block): # make block available and try to merge with its neighbours in the arena (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _remove_allocated_block(self, block): arena, start, stop = block blocks = self._allocated_blocks[arena] blocks.remove((start, stop)) if not blocks: # Arena is entirely free, discard it from this process self._discard_arena(arena) def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held. while True: try: block = self._pending_free_blocks.pop() except IndexError: break self._add_free_block(block) self._remove_allocated_block(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under CPython it's atomic thanks to the GIL). if os.getpid() != self._lastpid: raise ValueError( "My pid ({0:n}) is not last pid {1:n}".format( os.getpid(),self._lastpid)) if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._n_frees += 1 self._free_pending_blocks() self._add_free_block(block) self._remove_allocated_block(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._n_mallocs += 1 # allow pending blocks to be marked available self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) real_stop = start + size if real_stop < stop: # if the returned block is larger than necessary, mark # the remainder available self._add_free_block((arena, real_stop, stop)) self._allocated_blocks[arena].add((start, real_stop)) return (arena, start, real_stop) # # Class wrapping a block allocated out of a Heap -- can be inherited by child process # class BufferWrapper(object): _heap = Heap() def __init__(self, size): if size < 0: raise ValueError("Size {0:n} out of range".format(size)) if sys.maxsize <= size: raise OverflowError("Size {0:n} too large".format(size)) block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start+size] uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/managers.py000066400000000000000000001344031455552142400254600ustar00rootroot00000000000000# # Module providing manager classes for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import sys import threading import signal import array import queue import time import types import os from os import getpid from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen, ProcessError from . import pool from . import process from . import util from . import get_context try: from . import shared_memory except ImportError: HAS_SHMEM = False else: HAS_SHMEM = True __all__.append('SharedMemoryManager') # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely identify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): if not isinstance(result, str): raise TypeError( "Result {0!r} (kind '{1}') type is {2}, not str".format( result, kind, type(result))) if kind == '#UNSERIALIZABLE': return RemoteError('Unserializable message: %s\n' % result) else: return RemoteError(result) else: return ValueError('Unrecognized message type {!r}'.format(kind)) class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): if not isinstance(authkey, bytes): raise TypeError( "Authkey {0!r} is type {1!s}, not bytes".format( authkey, type(authkey))) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: # what about stderr? util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = \ self.id_to_local_proxy_obj[ident] except KeyError: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' # Perhaps include debug info about 'c'? with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' return len(self.id_to_refcount) def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, /, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: if kwds or (len(args) != 1): raise ValueError( "Without callable, must have one non-keyword argument") obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: if not isinstance(method_to_typeid, dict): raise TypeError( "Method_to_typeid {0!r}: type {1!s}, not dict".format( method_to_typeid, type(method_to_typeid))) exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: # If no external references exist but an internal (to the # manager) still does and a new external reference is created # from it, restore the manager's tracking of it from the # previously stashed internal ref. if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = \ self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if ident not in self.id_to_refcount and \ ident in self.id_to_local_proxy_obj: util.debug('Server DECREF skipping %r', ident) return with self.mutex: if self.id_to_refcount[ident] <= 0: raise AssertionError( "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( ident, self.id_to_obj[ident], self.id_to_refcount[ident])) self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: # Two-step process in case the object turns out to contain other # proxy objects (e.g. a managed list of managed lists). # Otherwise, deleting self.id_to_obj[ident] would trigger the # deleting of the stored value (another managed object) which would # in turn attempt to acquire the mutex that is already held here. self.id_to_obj[ident] = (None, (), None) # thread-safe util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { #XXX: register dill? 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' # bpo-36368: protect server process from KeyboardInterrupt signals signal.signal(signal.SIGINT, signal.SIG_IGN) if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, /, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() if self._state.value != State.STARTED: if self._state.value == State.INITIAL: raise ProcessError("Unable to start server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass @property def address(self): return self._address @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): # isinstance? assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, /, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] # Should be set to True only when a proxy object is being created # on the manager server; primary use case: nested proxy objects. # RebuildProxy detects when a proxy is being created on the manager # and sets this value appropriately. self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = \ server.id_to_obj[token.id] incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return a proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, /, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref, manager_owned=manager_owned) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, /, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): _exposed_ = ('__next__', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self, n=1): return self._callmethod('notify', (n,)) def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) __class_getitem__ = classmethod(types.GenericAlias) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) DictProxy._method_to_typeid_ = { '__iter__': 'Iterator', } ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__' )) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocess.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) # # Definition of SharedMemoryManager and SharedMemoryServer # if HAS_SHMEM: class _SharedMemoryTracker: "Manages one or more shared memory segments." def __init__(self, name, segment_names=[]): self.shared_memory_context_name = name self.segment_names = segment_names def register_segment(self, segment_name): "Adds the supplied shared memory block name to tracker." util.debug(f"Register segment {segment_name!r} in pid {getpid()}") self.segment_names.append(segment_name) def destroy_segment(self, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the list of blocks being tracked.""" util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") self.segment_names.remove(segment_name) segment = shared_memory.SharedMemory(segment_name) segment.close() segment.unlink() def unlink(self): "Calls destroy_segment() on all tracked shared memory blocks." for segment_name in self.segment_names[:]: self.destroy_segment(segment_name) def __del__(self): util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") self.unlink() def __getstate__(self): return (self.shared_memory_context_name, self.segment_names) def __setstate__(self, state): self.__init__(*state) class SharedMemoryServer(Server): public = Server.public + \ ['track_segment', 'release_segment', 'list_segments'] def __init__(self, *args, **kwargs): Server.__init__(self, *args, **kwargs) address = self.address # The address of Linux abstract namespaces can be bytes if isinstance(address, bytes): address = os.fsdecode(address) self.shared_memory_context = \ _SharedMemoryTracker(f"shm_{address}_{getpid()}") util.debug(f"SharedMemoryServer started by pid {getpid()}") def create(self, c, typeid, /, *args, **kwargs): """Create a new distributed-shared object (not backed by a shared memory block) and return its id to be used in a Proxy Object.""" # Unless set up as a shared proxy, don't make shared_memory_context # a standard part of kwargs. This makes things easier for supplying # simple functions. if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): kwargs['shared_memory_context'] = self.shared_memory_context return Server.create(self, c, typeid, *args, **kwargs) def shutdown(self, c): "Call unlink() on all tracked shared memory, terminate the Server." self.shared_memory_context.unlink() return Server.shutdown(self, c) def track_segment(self, c, segment_name): "Adds the supplied shared memory block name to Server's tracker." self.shared_memory_context.register_segment(segment_name) def release_segment(self, c, segment_name): """Calls unlink() on the shared memory block with the supplied name and removes it from the tracker instance inside the Server.""" self.shared_memory_context.destroy_segment(segment_name) def list_segments(self, c): """Returns a list of names of shared memory blocks that the Server is currently tracking.""" return self.shared_memory_context.segment_names class SharedMemoryManager(BaseManager): """Like SyncManager but uses SharedMemoryServer instead of Server. It provides methods for creating and returning SharedMemory instances and for creating a list-like object (ShareableList) backed by shared memory. It also provides methods that create and return Proxy Objects that support synchronization across processes (i.e. multi-process-safe locks and semaphores). """ _Server = SharedMemoryServer def __init__(self, *args, **kwargs): if os.name == "posix": # bpo-36867: Ensure the resource_tracker is running before # launching the manager process, so that concurrent # shared_memory manipulation both in the manager and in the # current process does not create two resource_tracker # processes. from . import resource_tracker resource_tracker.ensure_running() BaseManager.__init__(self, *args, **kwargs) util.debug(f"{self.__class__.__name__} created by pid {getpid()}") def __del__(self): util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") pass def get_server(self): 'Better than monkeypatching for now; merge into Server ultimately' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started SharedMemoryServer") elif self._state.value == State.SHUTDOWN: raise ProcessError("SharedMemoryManager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) return self._Server(self._registry, self._address, self._authkey, self._serializer) def SharedMemory(self, size): """Returns a new SharedMemory instance with the specified size in bytes, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sms = shared_memory.SharedMemory(None, create=True, size=size) try: dispatch(conn, None, 'track_segment', (sms.name,)) except BaseException as e: sms.unlink() raise e return sms def ShareableList(self, sequence): """Returns a new ShareableList instance populated with the values from the input sequence, to be tracked by the manager.""" with self._Client(self._address, authkey=self._authkey) as conn: sl = shared_memory.ShareableList(sequence) try: dispatch(conn, None, 'track_segment', (sl.shm.name,)) except BaseException as e: sl.shm.unlink() raise e return sl uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/pool.py000066400000000000000000000774531455552142400246470ustar00rootroot00000000000000# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import collections import itertools import os import queue import threading import time import traceback import types import warnings # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError from .connection import wait # # Constants representing the state of a pool # INIT = "INIT" RUN = "RUN" CLOSE = "CLOSE" TERMINATE = "TERMINATE" # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): if (maxtasks is not None) and not (isinstance(maxtasks, int) and maxtasks >= 1): raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception and func is not _helper_reraises_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) task = job = result = func = args = kwds = None completed += 1 util.debug('worker exiting after %d tasks' % completed) def _helper_reraises_exception(ex): 'Pickle-able helper function for use by _guarded_task_generation.' raise ex # # Class representing a process pool # class _PoolCache(dict): """ Class that implements a cache for the Pool class that will notify the pool management threads every time the cache is emptied. The notification is done by the use of a queue that is provided when instantiating the cache. """ def __init__(self, /, *args, notifier=None, **kwds): self.notifier = notifier super().__init__(*args, **kwds) def __delitem__(self, item): super().__delitem__(item) # Notify that the cache is empty. This is important because the # pool keeps maintaining workers until the cache gets drained. This # eliminates a race condition in which a task is finished after the # the pool's _handle_workers method has enter another iteration of the # loop. In this situation, the only event that can wake up the pool # is the cache to be emptied (no more tasks available). if not self: self.notifier.put(None) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True @staticmethod def Process(ctx, *args, **kwds): return ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): # Attributes initialized early to make sure that they exist in # __del__() if __init__() raises an exception self._pool = [] self._state = INIT self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.SimpleQueue() # The _change_notifier queue exist to wake up self._handle_workers() # when the cache (self._cache) is empty or when there is a change in # the _state variable of the thread that runs _handle_workers. self._change_notifier = self._ctx.SimpleQueue() self._cache = _PoolCache(notifier=self._change_notifier) self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes try: self._repopulate_pool() except Exception: for p in self._pool: if p.exitcode is None: p.terminate() for p in self._pool: p.join() raise sentinels = self._get_sentinels() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self._cache, self._taskqueue, self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, sentinels, self._change_notifier) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._change_notifier, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) self._state = RUN # Copy globals as function locals to make sure that they are available # during Python shutdown when the Pool is destroyed. def __del__(self, _warn=warnings.warn, RUN=RUN): if self._state == RUN: _warn(f"unclosed running multiprocessing pool {self!r}", ResourceWarning, source=self) if getattr(self, '_change_notifier', None) is not None: self._change_notifier.put(None) def __repr__(self): cls = self.__class__ return (f'<{cls.__module__}.{cls.__qualname__} ' f'state={self._state} ' f'pool_size={len(self._pool)}>') def _get_sentinels(self): task_queue_sentinels = [self._outqueue._reader] self_notifier_sentinels = [self._change_notifier._reader] return [*task_queue_sentinels, *self_notifier_sentinels] @staticmethod def _get_worker_sentinels(workers): return [worker.sentinel for worker in workers if hasattr(worker, "sentinel")] @staticmethod def _join_exited_workers(pool): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(pool))): worker = pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del pool[i] return cleaned def _repopulate_pool(self): return self._repopulate_pool_static(self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) @staticmethod def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(processes - len(pool)): w = Process(ctx, target=worker, args=(inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() pool.append(w) util.debug('added worker') @staticmethod def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception): """Clean up any exited workers and start replacements for them. """ if Pool._join_exited_workers(pool): Pool._repopulate_pool_static(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _check_running(self): if self._state != RUN: raise ValueError("Pool not running") def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. Pool must be running. ''' return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def _guarded_task_generation(self, result_job, func, iterable): '''Provides a generator of tasks for imap and imap_unordered with appropriate handling for iterables which throw exceptions during iteration.''' try: i = -1 for i, x in enumerate(iterable): yield (result_job, i, func, (x,), {}) except Exception as e: yield (result_job, i+1, _helper_reraises_exception, (e,), {}) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' self._check_running() if chunksize == 1: result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0:n}".format( chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' self._check_running() if chunksize == 1: result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, func, iterable), result._set_length )) return result else: if chunksize < 1: raise ValueError( "Chunksize must be 1+, not {0!r}".format(chunksize)) task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapstar, task_batches), result._set_length )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' self._check_running() result = ApplyResult(self, callback, error_callback) self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' self._check_running() if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put( ( self._guarded_task_generation(result._job, mapper, task_batches), None ) ) return result @staticmethod def _wait_for_updates(sentinels, change_notifier, timeout=None): wait(sentinels, timeout=timeout) while not change_notifier.empty(): change_notifier.get() @classmethod def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, sentinels, change_notifier): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (cache and thread._state != TERMINATE): cls._maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception) current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] cls._wait_for_updates(current_sentinels, change_notifier) # send sentinel to stop workers taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): task = None try: # iterating taskseq cannot fail for task in taskseq: if thread._state != RUN: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, idx = task[:2] try: cache[job]._set(idx, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') idx = task[1] if task else -1 set_length(idx + 1) continue break finally: task = taskseq = job = None else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state != RUN: assert thread._state == TERMINATE, "Thread not in TERMINATE" util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass task = job = obj = None if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE self._change_notifier.put(None) def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') if self._state == RUN: raise ValueError("Pool is still running") elif self._state not in (CLOSE, TERMINATE): raise ValueError("In unknown state") self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') # Notify that the worker_handler state has been changed so the # _handle_workers loop can be unblocked (and exited) in order to # send the finalization sentinel all the workers. worker_handler._state = TERMINATE change_notifier.put(None) task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) if (not result_handler.is_alive()) and (len(cache) != 0): raise AssertionError( "Cannot have cache with result_hander not alive") result_handler._state = TERMINATE change_notifier.put(None) outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): self._check_running() return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, pool, callback, error_callback): self._pool = pool self._event = threading.Event() self._job = next(job_counter) self._cache = pool._cache self._callback = callback self._error_callback = error_callback self._cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): if not self.ready(): raise ValueError("{0!r} not ready".format(self)) return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] self._pool = None __class_getitem__ = classmethod(types.GenericAlias) AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, pool, chunksize, length, callback, error_callback): ApplyResult.__init__(self, pool, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del self._cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): self._number_left -= 1 success, result = success_result if success and self._success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() self._pool = None else: if not success and self._success: # only store first exception self._success = False self._value = result if self._number_left == 0: # only consider the result ready once all jobs are done if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() self._pool = None # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, pool): self._pool = pool self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = pool._cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} self._cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._pool = None raise StopIteration from None raise TimeoutError from None success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] self._pool = None def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] self._pool = None # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] self._pool = None # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(ctx, *args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.SimpleQueue() self._outqueue = queue.SimpleQueue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _get_sentinels(self): return [self._change_notifier._reader] @staticmethod def _get_worker_sentinels(workers): return [] @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # drain inqueue, and put sentinels at its head to make workers finish try: while True: inqueue.get(block=False) except queue.Empty: pass for i in range(size): inqueue.put(None) def _wait_for_updates(self, sentinels, change_notifier, timeout): time.sleep(timeout) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/popen_fork.py000066400000000000000000000045061455552142400260250ustar00rootroot00000000000000import os import signal from . import util __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self.finalizer = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: self.returncode = os.waitstatus_to_exitcode(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from multiprocess.connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def _send_signal(self, sig): if self.returncode is None: try: os.kill(self.pid, sig) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def terminate(self): self._send_signal(signal.SIGTERM) def kill(self): self._send_signal(signal.SIGKILL) def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) os.close(parent_w) code = process_obj._bootstrap(parent_sentinel=child_r) finally: os._exit(code) else: os.close(child_w) os.close(child_r) self.finalizer = util.Finalize(self, util.close_fds, (parent_r, parent_w,)) self.sentinel = parent_r def close(self): if self.finalizer is not None: self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/popen_forkserver.py000066400000000000000000000042631455552142400272540ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen if not reduction.HAVE_SEND_HANDLE: raise ImportError('No support for sending fds between processes') from . import forkserver from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) # Keep a duplicate of the data pipe's write end as a sentinel of the # parent process used by the child process. _parent_w = os.dup(w) self.finalizer = util.Finalize(self, util.close_fds, (_parent_w, self.sentinel)) with open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_signed(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from multiprocess.connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_signed(self.sentinel) except (OSError, EOFError): # This should not happen usually, but perhaps the forkserver # process itself got killed self.returncode = 255 return self.returncode uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/popen_spawn_posix.py000066400000000000000000000037551455552142400274430ustar00rootroot00000000000000import io import os from .context import reduction, set_spawning_popen from . import popen_fork from . import spawn from . import util __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super().__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): from . import resource_tracker tracker_fd = resource_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds) self.sentinel = parent_r with open(parent_w, 'wb', closefd=False) as f: f.write(fp.getbuffer()) finally: fds_to_close = [] for fd in (parent_r, parent_w): if fd is not None: fds_to_close.append(fd) self.finalizer = util.Finalize(self, util.close_fds, fds_to_close) for fd in (child_r, child_w): if fd is not None: os.close(fd) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/popen_spawn_win32.py000066400000000000000000000076531455552142400272440ustar00rootroot00000000000000import os import msvcrt import signal import sys import _winapi from .context import reduction, get_spawning_popen, set_spawning_popen from . import spawn from . import util __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") def _path_eq(p1, p2): return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) WINENV = not _path_eq(sys.executable, sys._base_executable) def _close_handles(*handles): for handle in handles: _winapi.CloseHandle(handle) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' def __init__(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be duplicated by the child process # -- see spawn_main() in spawn.py. # # bpo-33929: Previously, the read end of pipe was "stolen" by the child # process, but it leaked a handle if the child process had been # terminated before it could steal the handle from the parent process. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) python_exe = spawn.get_executable() # bpo-35797: When running in a venv, we bypass the redirect # executor and launch our base Python. if WINENV and _path_eq(python_exe, sys.executable): python_exe = sys._base_executable env = os.environ.copy() env["__PYVENV_LAUNCHER__"] = sys.executable else: env = None with open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = _winapi.CreateProcess( python_exe, cmd, None, None, False, 0, env, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) self.finalizer = util.Finalize(self, _close_handles, (self.sentinel, int(rhandle))) # send information to child set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: set_spawning_popen(None) def duplicate_for_child(self, handle): assert self is get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise kill = terminate def close(self): self.finalizer() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/process.py000066400000000000000000000273321455552142400253430ustar00rootroot00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['BaseProcess', 'current_process', 'active_children', 'parent_process'] # # Imports # import os import sys import signal import itertools import threading from _weakrefset import WeakSet # # # try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def active_children(): ''' Return list of process objects corresponding to live child processes ''' _cleanup() return list(_children) def parent_process(): ''' Return process object representing the parent process ''' return _parent_process # # # def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) # # The `Process` class # class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analogous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count,) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._parent_name = _current_process.name self._popen = None self._closed = False self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) if daemon is not None: self.daemon = daemon _dangling.add(self) def _check_closed(self): if self._closed: raise ValueError("process object is closed") def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' self._check_closed() assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel # Avoid a refcycle if the target function holds an indirect # reference to the process object (see bpo-30775) del self._target, self._args, self._kwargs _children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._check_closed() self._popen.terminate() def kill(self): ''' Terminate process; sends SIGKILL signal or uses TerminateProcess() ''' self._check_closed() self._popen.kill() def join(self, timeout=None): ''' Wait until child process terminates ''' self._check_closed() assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) def is_alive(self): ''' Return whether process is alive ''' self._check_closed() if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False returncode = self._popen.poll() if returncode is None: return True else: _children.discard(self) return False def close(self): ''' Close the Process object. This method releases resources held by the Process object. It is an error to call this method if the child process is still running. ''' if self._popen is not None: if self._popen.poll() is None: raise ValueError("Cannot close a process while it is still running. " "You should first call join() or terminate().") self._popen.close() self._popen = None del self._sentinel _children.discard(self) self._closed = True @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' self._check_closed() if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' self._check_closed() if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None def __repr__(self): exitcode = None if self is _current_process: status = 'started' elif self._closed: status = 'closed' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: exitcode = self._popen.poll() if exitcode is not None: status = 'stopped' else: status = 'started' info = [type(self).__name__, 'name=%r' % self._name] if self._popen is not None: info.append('pid=%s' % self._popen.pid) info.append('parent=%s' % self._parent_pid) info.append(status) if exitcode is not None: exitcode = _exitcode_to_name.get(exitcode, exitcode) info.append('exitcode=%s' % exitcode) if self.daemon: info.append('daemon') return '<%s>' % ' '.join(info) ## def _bootstrap(self, parent_sentinel=None): from . import util, context global _current_process, _parent_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() util._close_stdin() old_process = _current_process _current_process = self _parent_process = _ParentProcess( self._parent_name, self._parent_pid, parent_sentinel) if threading._HAVE_THREAD_NATIVE_ID: threading.main_thread()._set_native_id() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as e: if e.code is None: exitcode = 0 elif isinstance(e.code, int): exitcode = e.code else: sys.stderr.write(str(e.code) + '\n') exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: threading._shutdown() util.info('process exiting with exitcode %d' % exitcode) util._flush_std_streams() return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' ) return AuthenticationString, (bytes(self),) # # Create object representing the parent process # class _ParentProcess(BaseProcess): def __init__(self, name, pid, sentinel): self._identity = () self._name = name self._pid = pid self._parent_pid = None self._popen = None self._closed = False self._sentinel = sentinel self._config = {} def is_alive(self): from multiprocess.connection import wait return not wait([self._sentinel], timeout=0) @property def ident(self): return self._pid def join(self, timeout=None): ''' Wait until parent process terminates ''' from multiprocess.connection import wait wait([self._sentinel], timeout=timeout) pid = ident # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._closed = False self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} # Note that some versions of FreeBSD only allow named # semaphores to have names of up to 14 characters. Therefore # we choose a short prefix. # # On MacOSX in a sandbox it may be necessary to use a # different prefix -- see #19478. # # Everything in self._config will be inherited by descendant # processes. def close(self): pass _parent_process = None _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in list(signal.__dict__.items()): if name[:3]=='SIG' and '_' not in name: _exitcode_to_name[-signum] = f'-{name}' # For debug and leak testing _dangling = WeakSet() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/queues.py000066400000000000000000000275531455552142400252010ustar00rootroot00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import time import types import weakref import errno from queue import Empty, Full try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing from . import connection from . import context _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting # # Queue type using a pipe, buffer and thread # class Queue(object): def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._reset() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._reset() def _after_fork(self): debug('Queue._after_fork()') self._reset(after_fork=True) def _reset(self, after_fork=False): if after_fork: self._notempty._at_fork_reinit() else: self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = getattr(time,'monotonic',time.time)() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - getattr(time,'monotonic',time.time)() if not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed, "Queue {0!r} not closed".format(self) if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._reader.close, self._writer.close, self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, reader_close, writer_close, ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') reader_close() writer_close() return # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need # to decrease the size of the queue. The error acts as # if the object had been silently removed from the queue # and this step is necessary to have a properly working # queue. queue_sem.release() onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): """ Private API hook called when feeding data in the background thread raises an exception. For overriding by concurrent.futures. """ import traceback traceback.print_exc() _sentinel = object() # # A queue type which also supports join() and task_done() methods # # Note that if you do not call task_done() for each finished task then # eventually the counter's semaphore may overflow causing Bad Things # to happen. # class JoinableQueue(Queue): def __init__(self, maxsize=0, *, ctx): Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): if self._closed: raise ValueError(f"Queue {self!r} is closed") if not self._sem.acquire(block, timeout): raise Full with self._notempty, self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() # # Simplified Queue type -- really just a locked pipe # class SimpleQueue(object): def __init__(self, *, ctx): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() def close(self): self._reader.close() self._writer.close() def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._poll = self._reader.poll def get(self): with self._rlock: res = self._reader.recv_bytes() # unserialize the data after having released the lock return _ForkingPickler.loads(res) def put(self, obj): # serialize the data before acquiring the lock obj = _ForkingPickler.dumps(obj) if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(obj) else: with self._wlock: self._writer.send_bytes(obj) __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/reduction.py000066400000000000000000000226451455552142400256630ustar00rootroot00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from abc import ABCMeta import copyreg import functools import io import os try: import dill as pickle except ImportError: import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocess.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None, *args, **kwds): buf = io.BytesIO() cls(buf, protocol, *args, **kwds).dump(obj) return buf.getbuffer() loads = pickle.loads register = ForkingPickler.register def dump(obj, file, protocol=None, *args, **kwds): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol, *args, **kwds).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi def duplicate(handle, target_process=None, inheritable=False, *, source_process=None): '''Duplicate a handle. (target_process is a handle not a pid!)''' current_process = _winapi.GetCurrentProcess() if source_process is None: source_process = current_process if target_process is None: target_process = current_process return _winapi.DuplicateHandle( source_process, handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError('received %d items of ancdata' % len(ancdata)) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) if len(a) % 256 != msg[0]: raise AssertionError( "Len is {0:n} but msg[0] is {1!r}".format( len(a), msg[0])) return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) class AbstractReducer(metaclass=ABCMeta): '''Abstract base class for use in implementing a Reduction class suitable for use in replacing the standard reduction mechanism used in multiprocess.''' ForkingPickler = ForkingPickler register = register dump = dump send_handle = send_handle recv_handle = recv_handle if sys.platform == 'win32': steal_handle = steal_handle duplicate = duplicate DupHandle = DupHandle else: sendfds = sendfds recvfds = recvfds DupFd = DupFd _reduce_method = _reduce_method _reduce_method_descriptor = _reduce_method_descriptor _rebuild_partial = _rebuild_partial _reduce_socket = _reduce_socket _rebuild_socket = _rebuild_socket def __init__(self, *args): register(type(_C().f), _reduce_method) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) register(functools.partial, _reduce_partial) register(socket.socket, _reduce_socket) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/resource_sharer.py000066400000000000000000000120141455552142400270470ustar00rootroot00000000000000# # We use a background thread for sharing fds on Unix, and for sharing sockets on # Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # import os import signal import socket import sys import threading from . import process from .context import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resources using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() self._lock._at_fork_reinit() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None, "Already have Listener" util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/resource_tracker.py000066400000000000000000000207701455552142400272260ustar00rootroot00000000000000############################################################################### # Server process to keep track of unlinked resources (like shared memory # segments, semaphores etc.) and clean them. # # On Unix we run a server process which keeps track of unlinked # resources. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining resource names. # # This is important because there may be system limits for such resources: for # instance, the system only supports a limited number of named semaphores, and # shared-memory segments live in the RAM. If a python process leaks such a # resource, this resource will not be removed till the next reboot. Without # this resource tracker process, "killall python" would probably leave unlinked # resources. import os import signal import sys import threading import warnings from . import spawn from . import util __all__ = ['ensure_running', 'register', 'unregister'] _HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') _IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) _CLEANUP_FUNCS = { 'noop': lambda: None, } if os.name == 'posix': try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import _posixshmem _CLEANUP_FUNCS.update({ 'semaphore': _multiprocessing.sem_unlink, 'shared_memory': _posixshmem.shm_unlink, }) class ResourceTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None self._pid = None def _stop(self): with self._lock: if self._fd is None: # not running return # closing the "alive" file descriptor stops main() os.close(self._fd) self._fd = None os.waitpid(self._pid, 0) self._pid = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that resource tracker process is running. This can be run from any process. Usually a child process will use the resource created by its parent.''' with self._lock: if self._fd is not None: # resource tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) # Clean-up to avoid dangling processes. try: # _pid can be None if this process is a child from another # python process, which has started the resource_tracker. if self._pid is not None: os.waitpid(self._pid, 0) except ChildProcessError: # The resource_tracker has already been terminated. pass self._fd = None self._pid = None warnings.warn('resource_tracker: process died unexpectedly, ' 'relaunching. Some resources might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from multiprocess.resource_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] # bpo-33613: Register a signal mask that will block the signals. # This signal mask will be inherited by the child that is going # to be spawned and will protect the child from a race condition # that can make the child die before it registers signal handlers # for SIGINT and SIGTERM. The mask is unregistered after spawning # the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = util.spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r) def _check_alive(self): '''Check that the pipe has not been closed by sending a probe.''' try: # We cannot use send here as it calls ensure_running, creating # a cycle. os.write(self._fd, b'PROBE:0:noop\n') except OSError: return False else: return True def register(self, name, rtype): '''Register name of resource with resource tracker.''' self._send('REGISTER', name, rtype) def unregister(self, name, rtype): '''Unregister name of resource with resource tracker.''' self._send('UNREGISTER', name, rtype) def _send(self, cmd, name, rtype): self.ensure_running() msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( nbytes, len(msg)) _resource_tracker = ResourceTracker() ensure_running = _resource_tracker.ensure_running register = _resource_tracker.register unregister = _resource_tracker.unregister getfd = _resource_tracker.getfd def main(fd): '''Run resource tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} try: # keep track of registered/unregistered resources with open(fd, 'rb') as f: for line in f: try: cmd, name, rtype = line.strip().decode('ascii').split(':') cleanup_func = _CLEANUP_FUNCS.get(rtype, None) if cleanup_func is None: raise ValueError( f'Cannot register {name} for automatic cleanup: ' f'unknown resource type {rtype}') if cmd == 'REGISTER': cache[rtype].add(name) elif cmd == 'UNREGISTER': cache[rtype].remove(name) elif cmd == 'PROBE': pass else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining resources for rtype, rtype_cache in cache.items(): if rtype_cache: try: warnings.warn('resource_tracker: There appear to be %d ' 'leaked %s objects to clean up at shutdown' % (len(rtype_cache), rtype)) except Exception: pass for name in rtype_cache: # For some reason the process which created and registered this # resource has failed to unregister it. Presumably it has # died. We therefore unlink it. try: try: _CLEANUP_FUNCS[rtype](name) except Exception as e: warnings.warn('resource_tracker: %r: %s' % (name, e)) finally: pass uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/shared_memory.py000066400000000000000000000437341455552142400265270ustar00rootroot00000000000000"""Provides shared memory for direct access across processes. The API of this package is currently provisional. Refer to the documentation for details. """ __all__ = [ 'SharedMemory', 'ShareableList' ] from functools import partial import mmap import os import errno import struct import secrets import types if os.name == "nt": import _winapi _USE_POSIX = False else: import _posixshmem _USE_POSIX = True _O_CREX = os.O_CREAT | os.O_EXCL # FreeBSD (and perhaps other BSDs) limit names to 14 characters. _SHM_SAFE_NAME_LENGTH = 14 # Shared memory block name prefix if _USE_POSIX: _SHM_NAME_PREFIX = '/psm_' else: _SHM_NAME_PREFIX = 'wnsm_' def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name class SharedMemory: """Creates a new shared memory block or attaches to an existing shared memory block. Every shared memory block is assigned a unique name. This enables one process to create a shared memory block with a particular name so that a different process can attach to that same shared memory block using that same name. As a resource for sharing data across processes, shared memory blocks may outlive the original process that created them. When one process no longer needs access to a shared memory block that might still be needed by other processes, the close() method should be called. When a shared memory block is no longer needed by any process, the unlink() method should be called to ensure proper cleanup.""" # Defaults; enables close() and unlink() to run without errors. _name = None _fd = -1 _mmap = None _buf = None _flags = os.O_RDWR _mode = 0o600 _prepend_leading_slash = True if _USE_POSIX else False def __init__(self, name=None, create=False, size=0): if not size >= 0: raise ValueError("'size' must be a positive integer") if create: self._flags = _O_CREX | os.O_RDWR if size == 0: raise ValueError("'size' must be a positive number different from zero") if name is None and not self._flags & os.O_EXCL: raise ValueError("'name' can only be None if create=True") if _USE_POSIX: # POSIX Shared Memory if name is None: while True: name = _make_filename() try: self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) except FileExistsError: continue self._name = name break else: name = "/" + name if self._prepend_leading_slash else name self._fd = _posixshmem.shm_open( name, self._flags, mode=self._mode ) self._name = name try: if create and size: os.ftruncate(self._fd, size) stats = os.fstat(self._fd) size = stats.st_size self._mmap = mmap.mmap(self._fd, size) except OSError: self.unlink() raise from .resource_tracker import register register(self._name, "shared_memory") else: # Windows Named Shared Memory if create: while True: temp_name = _make_filename() if name is None else name # Create and reserve shared memory block with this name # until it can be attached to by mmap. h_map = _winapi.CreateFileMapping( _winapi.INVALID_HANDLE_VALUE, _winapi.NULL, _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF, size & 0xFFFFFFFF, temp_name ) try: last_error_code = _winapi.GetLastError() if last_error_code == _winapi.ERROR_ALREADY_EXISTS: if name is not None: raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), name, _winapi.ERROR_ALREADY_EXISTS ) else: continue self._mmap = mmap.mmap(-1, size, tagname=temp_name) finally: _winapi.CloseHandle(h_map) self._name = temp_name break else: self._name = name # Dynamically determine the existing named shared memory # block's size which is likely a multiple of mmap.PAGESIZE. h_map = _winapi.OpenFileMapping( _winapi.FILE_MAP_READ, False, name ) try: p_buf = _winapi.MapViewOfFile( h_map, _winapi.FILE_MAP_READ, 0, 0, 0 ) finally: _winapi.CloseHandle(h_map) size = _winapi.VirtualQuerySize(p_buf) self._mmap = mmap.mmap(-1, size, tagname=name) self._size = size self._buf = memoryview(self._mmap) def __del__(self): try: self.close() except OSError: pass def __reduce__(self): return ( self.__class__, ( self.name, False, self.size, ), ) def __repr__(self): return f'{self.__class__.__name__}({self.name!r}, size={self.size})' @property def buf(self): "A memoryview of contents of the shared memory block." return self._buf @property def name(self): "Unique name that identifies the shared memory block." reported_name = self._name if _USE_POSIX and self._prepend_leading_slash: if self._name.startswith("/"): reported_name = self._name[1:] return reported_name @property def size(self): "Size in bytes." return self._size def close(self): """Closes access to the shared memory from this instance but does not destroy the shared memory block.""" if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() self._mmap = None if _USE_POSIX and self._fd >= 0: os.close(self._fd) self._fd = -1 def unlink(self): """Requests that the underlying shared memory block be destroyed. In order to ensure proper cleanup of resources, unlink should be called once (and only once) across all processes which have access to the shared memory block.""" if _USE_POSIX and self._name: from .resource_tracker import unregister _posixshmem.shm_unlink(self._name) unregister(self._name, "shared_memory") _encoding = "utf8" class ShareableList: """Pattern for a mutable list-like object shareable via a shared memory block. It differs from the built-in list type in that these lists can not change their overall length (i.e. no append, insert, etc.) Because values are packed into a memoryview as bytes, the struct packing format for any storable value must require no more than 8 characters to describe its format.""" # The shared memory area is organized as follows: # - 8 bytes: number of items (N) as a 64-bit integer # - (N + 1) * 8 bytes: offsets of each element from the start of the # data area # - K bytes: the data area storing item values (with encoding and size # depending on their respective types) # - N * 8 bytes: `struct` format string for each element # - N bytes: index into _back_transforms_mapping for each element # (for reconstructing the corresponding Python value) _types_mapping = { int: "q", float: "d", bool: "xxxxxxx?", str: "%ds", bytes: "%ds", None.__class__: "xxxxxx?x", } _alignment = 8 _back_transforms_mapping = { 0: lambda value: value, # int, float, bool 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str 2: lambda value: value.rstrip(b'\x00'), # bytes 3: lambda _value: None, # None } @staticmethod def _extract_recreation_code(value): """Used in concert with _back_transforms_mapping to convert values into the appropriate Python objects when retrieving them from the list as well as when storing them.""" if not isinstance(value, (str, bytes, None.__class__)): return 0 elif isinstance(value, str): return 1 elif isinstance(value, bytes): return 2 else: return 3 # NoneType def __init__(self, sequence=None, *, name=None): if name is None or sequence is not None: sequence = sequence or () _formats = [ self._types_mapping[type(item)] if not isinstance(item, (str, bytes)) else self._types_mapping[type(item)] % ( self._alignment * (len(item) // self._alignment + 1), ) for item in sequence ] self._list_len = len(_formats) assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len offset = 0 # The offsets of each list element into the shared memory's # data area (0 meaning the start of the data area, not the start # of the shared memory area). self._allocated_offsets = [0] for fmt in _formats: offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1]) self._allocated_offsets.append(offset) _recreation_codes = [ self._extract_recreation_code(item) for item in sequence ] requested_size = struct.calcsize( "q" + self._format_size_metainfo + "".join(_formats) + self._format_packing_metainfo + self._format_back_transform_codes ) self.shm = SharedMemory(name, create=True, size=requested_size) else: self.shm = SharedMemory(name) if sequence is not None: _enc = _encoding struct.pack_into( "q" + self._format_size_metainfo, self.shm.buf, 0, self._list_len, *(self._allocated_offsets) ) struct.pack_into( "".join(_formats), self.shm.buf, self._offset_data_start, *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) ) struct.pack_into( self._format_packing_metainfo, self.shm.buf, self._offset_packing_formats, *(v.encode(_enc) for v in _formats) ) struct.pack_into( self._format_back_transform_codes, self.shm.buf, self._offset_back_transform_codes, *(_recreation_codes) ) else: self._list_len = len(self) # Obtains size from offset 0 in buffer. self._allocated_offsets = list( struct.unpack_from( self._format_size_metainfo, self.shm.buf, 1 * 8 ) ) def _get_packing_format(self, position): "Gets the packing format for a single value stored in the list." position = position if position >= 0 else position + self._list_len if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") v = struct.unpack_from( "8s", self.shm.buf, self._offset_packing_formats + position * 8 )[0] fmt = v.rstrip(b'\x00') fmt_as_str = fmt.decode(_encoding) return fmt_as_str def _get_back_transform(self, position): "Gets the back transformation function for a single value." if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") transform_code = struct.unpack_from( "b", self.shm.buf, self._offset_back_transform_codes + position )[0] transform_function = self._back_transforms_mapping[transform_code] return transform_function def _set_packing_format_and_transform(self, position, fmt_as_str, value): """Sets the packing format and back transformation code for a single value in the list at the specified position.""" if (position >= self._list_len) or (self._list_len < 0): raise IndexError("Requested position out of range.") struct.pack_into( "8s", self.shm.buf, self._offset_packing_formats + position * 8, fmt_as_str.encode(_encoding) ) transform_code = self._extract_recreation_code(value) struct.pack_into( "b", self.shm.buf, self._offset_back_transform_codes + position, transform_code ) def __getitem__(self, position): position = position if position >= 0 else position + self._list_len try: offset = self._offset_data_start + self._allocated_offsets[position] (v,) = struct.unpack_from( self._get_packing_format(position), self.shm.buf, offset ) except IndexError: raise IndexError("index out of range") back_transform = self._get_back_transform(position) v = back_transform(v) return v def __setitem__(self, position, value): position = position if position >= 0 else position + self._list_len try: item_offset = self._allocated_offsets[position] offset = self._offset_data_start + item_offset current_format = self._get_packing_format(position) except IndexError: raise IndexError("assignment index out of range") if not isinstance(value, (str, bytes)): new_format = self._types_mapping[type(value)] encoded_value = value else: allocated_length = self._allocated_offsets[position + 1] - item_offset encoded_value = (value.encode(_encoding) if isinstance(value, str) else value) if len(encoded_value) > allocated_length: raise ValueError("bytes/str item exceeds available storage") if current_format[-1] == "s": new_format = current_format else: new_format = self._types_mapping[str] % ( allocated_length, ) self._set_packing_format_and_transform( position, new_format, value ) struct.pack_into(new_format, self.shm.buf, offset, encoded_value) def __reduce__(self): return partial(self.__class__, name=self.shm.name), () def __len__(self): return struct.unpack_from("q", self.shm.buf, 0)[0] def __repr__(self): return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' @property def format(self): "The struct packing format used by all currently stored items." return "".join( self._get_packing_format(i) for i in range(self._list_len) ) @property def _format_size_metainfo(self): "The struct packing format used for the items' storage offsets." return "q" * (self._list_len + 1) @property def _format_packing_metainfo(self): "The struct packing format used for the items' packing formats." return "8s" * self._list_len @property def _format_back_transform_codes(self): "The struct packing format used for the items' back transforms." return "b" * self._list_len @property def _offset_data_start(self): # - 8 bytes for the list length # - (N + 1) * 8 bytes for the element offsets return (self._list_len + 2) * 8 @property def _offset_packing_formats(self): return self._offset_data_start + self._allocated_offsets[-1] @property def _offset_back_transform_codes(self): return self._offset_packing_formats + self._list_len * 8 def count(self, value): "L.count(value) -> integer -- return number of occurrences of value." return sum(value == entry for entry in self) def index(self, value): """L.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present.""" for position, entry in enumerate(self): if value == entry: return position else: raise ValueError(f"{value!r} not in this container") __class_getitem__ = classmethod(types.GenericAlias) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/sharedctypes.py000066400000000000000000000142421455552142400263570ustar00rootroot00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import ctypes import weakref from . import heap from . import get_context from .context import reduction, assert_spawning _ForkingPickler = reduction.ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] # # # typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, 'f': ctypes.c_float, 'd': ctypes.c_double } # # # def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, lock=True, ctx=None): ''' Return a synchronization wrapper for a Value ''' obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): ''' Return a synchronization wrapper for a RawArray ''' obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("%r has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = {name: make_property(name) for name in names} classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length _ForkingPickler.register(type_, reduce_ctype) buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name,)*7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/spawn.py000066400000000000000000000221151455552142400250070ustar00rootroot00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import sys import runpy import types from . import get_start_method, set_start_method from . import process from .context import reduction from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = getattr(sys, 'frozen', False) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--multiprocessing-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv), "Not forking" if sys.platform == 'win32': import msvcrt import _winapi if parent_pid is not None: source_process = _winapi.OpenProcess( _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid) else: source_process = None new_handle = reduction.duplicate(pipe_handle, source_process=source_process) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) parent_sentinel = source_process else: from . import resource_tracker resource_tracker._resource_tracker._fd = tracker_fd fd = pipe_handle parent_sentinel = os.dup(pipe_handle) exitcode = _main(fd, parent_sentinel) sys.exit(exitcode) def _main(fd, parent_sentinel): with os.fdopen(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = reduction.pickle.load(from_parent) prepare(preparation_data) self = reduction.pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap(parent_sentinel) def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path=sys.path.copy() try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] main_mod_name = getattr(main_module.__spec__, "name", None) if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method'], force=True) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/synchronize.py000066400000000000000000000270671455552142400262450ustar00rootroot00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] import threading import sys import tempfile try: import _multiprocess as _multiprocessing except ImportError: import _multiprocessing import time from . import context from . import process from . import util # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: from _multiprocess import SemLock, sem_unlink except (ImportError): try: from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770.") # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, *, ctx): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' for i in range(100): try: sl = self._semlock = _multiprocessing.SemLock( kind, value, maxvalue, self._make_name(), unlink_now) except FileExistsError: pass else: break else: raise FileExistsError('cannot find name for semaphore') util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if self._semlock.name is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .resource_tracker import register register(self._semlock.name, "semaphore") util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .resource_tracker import unregister sem_unlink(name) unregister(name, "semaphore") def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) # # Semaphore # class Semaphore(SemLock): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) # # Bounded semaphore # class BoundedSemaphore(Semaphore): def __init__(self, value=1, *, ctx): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % \ (self.__class__.__name__, value, self._semlock.maxvalue) # # Non-recursive lock # class Lock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) # # Recursive lock # class RLock(SemLock): def __init__(self, *, ctx): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) # # Condition variable # class Condition(object): def __init__(self, lock=None, *, ctx): self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self, n=1): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire( False), ('notify: Should not have been able to acquire ' + '_wait_semaphore') # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res, ('notify: Bug in sleeping_count.acquire' + '- res should not be False') sleepers = 0 while sleepers < n and self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def notify_all(self): self.notify(n=sys.maxsize) def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = getattr(time,'monotonic',time.time)() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - getattr(time,'monotonic',time.time)() if waittime <= 0: break self.wait(waittime) result = predicate() return result # # Event # class Event(object): def __init__(self, *, ctx): self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, *, ctx): import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): self._array[1] = value uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/000077500000000000000000000000001455552142400244465ustar00rootroot00000000000000uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/__init__.py000066400000000000000000005716141455552142400265750ustar00rootroot00000000000000# # Unit tests for the multiprocessing package # import unittest import unittest.mock import queue as pyqueue import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import subprocess import struct import operator import pickle #XXX: use dill? import weakref import warnings import test.support import test.support.script_helper from test import support from test.support import hashlib_helper from test.support import socket_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocess.synchronize') import threading import multiprocess as multiprocessing import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: from multiprocess import shared_memory HAS_SHMEM = True except ImportError: HAS_SHMEM = False try: import msvcrt except ImportError: msvcrt = None if hasattr(support,'check_sanitizer') and support.check_sanitizer(address=True): # bpo-45200: Skip multiprocessing tests if Python is built with ASAN to # work around a libasan race condition: dead lock in pthread_create(). raise unittest.SkipTest("libasan has a pthread_create() dead lock") # Don't ignore user's installed packages ENV = dict(__cleanenv = False, __isolated = False) # Timeout to wait until a process completes #XXX: travis-ci TIMEOUT = (90.0 if os.environ.get('COVERAGE') else 60.0) # seconds def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocessing.queues.Queue): queue.close() queue.join_thread() def join_process(process): # Since multiprocessing.Process has the same API than threading.Thread # (join() and is_alive(), the support function can be reused support.join_thread(process, timeout=TIMEOUT) if os.name == "posix": from multiprocess import resource_tracker def _resource_unlink(name, rtype): resource_tracker._CLEANUP_FUNCS[rtype](name) # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double, c_longlong except ImportError: Structure = object c_int = c_double = c_longlong = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = getattr(time,'monotonic',time.time)() try: return self.func(*args, **kwds) finally: self.elapsed = getattr(time,'monotonic',time.time)() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_parent_process_attributes(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) self.assertIsNone(self.parent_process()) rconn, wconn = self.Pipe(duplex=False) p = self.Process(target=self._test_send_parent_process, args=(wconn,)) p.start() p.join() parent_pid, parent_name = rconn.recv() self.assertEqual(parent_pid, self.current_process().pid) self.assertEqual(parent_pid, os.getpid()) self.assertEqual(parent_name, self.current_process().name) @classmethod def _test_send_parent_process(cls, wconn): from multiprocess.process import parent_process wconn.send([parent_process().pid, parent_process().name]) def _test_parent_process(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # Launch a child process. Make it launch a grandchild process. Kill the # child process and make sure that the grandchild notices the death of # its parent (a.k.a the child process). rconn, wconn = self.Pipe(duplex=False) p = self.Process( target=self._test_create_grandchild_process, args=(wconn, )) p.start() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "alive") p.terminate() p.join() if not rconn.poll(timeout=support.LONG_TIMEOUT): raise AssertionError("Could not communicate with child process") parent_process_status = rconn.recv() self.assertEqual(parent_process_status, "not alive") @classmethod def _test_create_grandchild_process(cls, wconn): p = cls.Process(target=cls._test_report_parent_status, args=(wconn, )) p.start() time.sleep(300) @classmethod def _test_report_parent_status(cls, wconn): from multiprocess.process import parent_process wconn.send("alive" if parent_process().is_alive() else "not alive") parent_process().join(timeout=support.SHORT_TIMEOUT) wconn.send("alive" if parent_process().is_alive() else "not alive") def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id") def test_process_mainthread_native_id(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current_mainthread_native_id = threading.main_thread().native_id q = self.Queue(1) p = self.Process(target=self._test_process_mainthread_native_id, args=(q,)) p.start() child_mainthread_native_id = q.get() p.join() close_queue(q) self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id) @classmethod def _test_process_mainthread_native_id(cls, q): mainthread_native_id = threading.main_thread().native_id q.put(mainthread_native_id) @classmethod def _sleep_some(cls): time.sleep(100) @classmethod def _test_sleep(cls, delay): time.sleep(delay) def _kill_process(self, meth): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._sleep_some) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) meth(p) if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() return p.exitcode def test_terminate(self): exitcode = self._kill_process(multiprocessing.Process.terminate) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGTERM) def test_kill(self): exitcode = self._kill_process(multiprocessing.Process.kill) if os.name != 'nt': self.assertEqual(exitcode, -signal.SIGKILL) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) @classmethod def _test_close(cls, rc=0, q=None): if q is not None: q.get() sys.exit(rc) def test_close(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) q = self.Queue() p = self.Process(target=self._test_close, kwargs={'q': q}) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) # Child is still alive, cannot close with self.assertRaises(ValueError): p.close() q.put(None) p.join() self.assertEqual(p.is_alive(), False) self.assertEqual(p.exitcode, 0) p.close() with self.assertRaises(ValueError): p.is_alive() with self.assertRaises(ValueError): p.join() with self.assertRaises(ValueError): p.terminate() p.close() wr = weakref.ref(p) del p gc.collect() self.assertIs(wr(), None) close_queue(q) @unittest.skipIf(True, 'bad pipe in pypy3') def test_many_processes(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() travis = os.environ.get('COVERAGE') #XXX: travis-ci N = (1 if travis else 5) if sm == 'spawn' else 100 # Try to overwhelm the forkserver loop with events procs = [self.Process(target=self._test_sleep, args=(0.01,)) for i in range(N)] for p in procs: p.start() for p in procs: join_process(p) for p in procs: self.assertEqual(p.exitcode, 0) procs = [self.Process(target=self._sleep_some) for i in range(N)] for p in procs: p.start() time.sleep(0.001) # let the children start... for p in procs: p.terminate() for p in procs: join_process(p) if os.name != 'nt': exitcodes = [-signal.SIGTERM] if sys.platform == 'darwin': # bpo-31510: On macOS, killing a freshly started process with # SIGTERM sometimes kills the process with SIGKILL. exitcodes.append(-signal.SIGKILL) for p in procs: self.assertIn(p.exitcode, exitcodes) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() for i in range(3): gc.collect() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_child_fd_inflation(self, evt, q): q.put(test.support.fd_count()) evt.wait() def test_child_fd_inflation(self): # Number of fds in child processes should not grow with the # number of running children. if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm == 'fork': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) N = 5 evt = self.Event() q = self.Queue() procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q)) for i in range(N)] for p in procs: p.start() try: fd_counts = [q.get() for i in range(N)] self.assertEqual(len(set(fd_counts)), 1, fd_counts) finally: evt.set() for p in procs: p.join() close_queue(q) @classmethod def _test_wait_for_threads(self, evt): def func1(): time.sleep(0.5) evt.set() def func2(): time.sleep(20) evt.clear() threading.Thread(target=func1).start() threading.Thread(target=func2, daemon=True).start() def test_wait_for_threads(self): # A child process should wait for non-daemonic threads to end # before exiting if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) evt = self.Event() proc = self.Process(target=self._test_wait_for_threads, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocessing.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = test.support.TESTFN self.addCleanup(test.support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) cases = [ ((True,), 1), ((False,), 0), ((8,), 8), ((None,), 0), ((), 0), ] for args, expected in cases: with self.subTest(args=args): p = self.Process(target=sys.exit, args=args) p.daemon = True p.start() join_process(p) self.assertEqual(p.exitcode, expected) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with test.support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with test.support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocessing.Queue() start = getattr(time,'monotonic',time.time)() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = getattr(time,'monotonic',time.time)() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with test.support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) close_queue(q) with test.support.captured_stderr(): # bpo-33078: verify that the queue size is correctly handled # on errors. q = self.Queue(maxsize=1) q.put(NotSerializable()) q.put(True) try: self.assertEqual(q.qsize(), 1) except NotImplementedError: # qsize is not available on all platform as it # relies on sem_getvalue pass self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Check that the size of the queue is correct self.assertTrue(q.empty()) close_queue(q) def test_queue_feeder_on_queue_feeder_error(self): # bpo-30006: verify feeder handles exceptions using the # _on_queue_feeder_error hook. if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): """Mock unserializable object""" def __init__(self): self.reduce_was_called = False self.on_queue_feeder_error_was_called = False def __reduce__(self): self.reduce_was_called = True raise AttributeError class SafeQueue(multiprocessing.queues.Queue): """Queue with overloaded _on_queue_feeder_error hook""" @staticmethod def _on_queue_feeder_error(e, obj): if (isinstance(e, AttributeError) and isinstance(obj, NotSerializable)): obj.on_queue_feeder_error_was_called = True not_serializable_obj = NotSerializable() # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): q = SafeQueue(ctx=multiprocessing.get_context()) q.put(not_serializable_obj) # Verify that q is still functioning correctly q.put(True) self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT)) # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) def test_closed_queue_put_get_exceptions(self): for q in multiprocessing.Queue(), multiprocessing.JoinableQueue(): q.close() with self.assertRaisesRegex(ValueError, 'is closed'): q.put('foo') with self.assertRaisesRegex(ValueError, 'is closed'): q.get() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def assertReachesEventually(self, func, value): for i in range(10): try: if func() == value: break except NotImplementedError: break time.sleep(DELTA) time.sleep(DELTA) self.assertReturnsIfImplemented(value, func) def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken self.assertReachesEventually(lambda: get_value(woken), 6) # check state is not mucked up self.check_invariant(cond) def test_notify_n(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake some of them up cond.acquire() cond.notify(n=2) cond.release() # check 2 have woken self.assertReachesEventually(lambda: get_value(woken), 2) # wake the rest of them cond.acquire() cond.notify(n=4) cond.release() self.assertReachesEventually(lambda: get_value(woken), 6) # doesn't do anything more cond.acquire() cond.notify(n=3) cond.release() self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = getattr(time,'monotonic',time.time)() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = getattr(time,'monotonic',time.time)() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() join_process(p) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocessing.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) @unittest.skipIf(True, 'bad timeout in pypy3') def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('q', 2 ** 33, 2 ** 34), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_nested_queue(self): a = self.list() # Test queue inside list a.append(self.Queue()) a[0].put(123) self.assertEqual(a[0].get(), 123) b = self.dict() # Test queue inside dict b[0] = self.Queue() b[0].put(456) self.assertEqual(b[0].get(), 456) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocessing.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = getattr(time,'monotonic',time.time)() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs for i in range(3): gc.collect() time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def test_enter(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) with pool: pass # call pool.terminate() # pool is no longer running with self.assertRaises(ValueError): # bpo-35477: pool.__enter__() fails if the pool is not running with pool: pass pool.join() def test_resource_warning(self): if self.TYPE == 'manager': self.skipTest("test not applicable to manager") pool = self.Pool(1) pool.terminate() pool.join() # force state to RUN to emit ResourceWarning in __del__() pool._state = multiprocessing.pool.RUN with support.check_warnings(('unclosed running multiprocessing pool', ResourceWarning)): pool = None support.gc_collect() def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocessing.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocessing.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): sm = multiprocessing.get_start_method() if sm == 'fork' and sys.implementation.name == 'pypy': self.skipTest("race condition on PyPy") p = multiprocessing.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocessing.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) def test_worker_finalization_via_atexit_handler_of_multiprocessing(self): # tests cases against bpo-38744 and bpo-39360 cmd = '''if 1: from multiprocess import Pool problem = None class A: def __init__(self): self.pool = Pool(processes=1) def test(): global problem problem = A() problem.pool.map(float, tuple(range(10))) if __name__ == "__main__": test() ''' rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) self.assertEqual(rc, 0) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop, # which happens on slow buildbots. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() self.addCleanup(manager.shutdown) p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. # Changed on PyPy: passing functions to xmlrpc is broken #self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue @hashlib_helper.requires_hashdigest('md5') class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER) try: srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue finally: if hasattr(manager, "shutdown"): manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() self.addCleanup(manager.shutdown) except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) if hasattr(manager, "shutdown"): self.addCleanup(manager.shutdown) # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(test.support.unlink, test.support.TESTFN) with open(test.support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(test.support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) @unittest.skipUnless(util.abstract_sockets_supported, "test needs abstract socket support") def test_abstract_socket(self): with self.connection.Listener("\0something") as listener: with self.connection.Client(listener.address) as client: with listener.accept() as d: client.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, listener.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") @hashlib_helper.requires_hashdigest('md5') class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=support.LONG_TIMEOUT) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.create_server((socket_helper.HOST, 0)) conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): super().setUp() # Make pristine heap for these tests self.old_heap = multiprocessing.heap.BufferWrapper._heap multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap() def tearDown(self): multiprocessing.heap.BufferWrapper._heap = self.old_heap super().tearDown() def _test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap heap._DISCARD_FREE_SPACE_LARGER_THAN = 0 # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] del b # verify the state of the heap with heap._lock: all = [] free = 0 occupied = 0 for L in list(heap._len_to_seq.values()): # count all free blocks in arenas for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) free += (stop-start) for arena, arena_blocks in heap._allocated_blocks.items(): # count all allocated blocks in arenas for start, stop in arena_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) self.assertEqual(free + occupied, sum(arena.size for arena in heap._arenas)) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] if arena != narena: # Two different arenas self.assertEqual(stop, heap._arenas[arena].size) # last block self.assertEqual(nstart, 0) # first block else: # Same arena: two adjacent blocks self.assertEqual(stop, nstart) # test free'ing all blocks random.shuffle(blocks) while blocks: blocks.pop() support.gc_collect() # for PyPy and other GCs self.assertEqual(heap._n_frees, heap._n_mallocs) self.assertEqual(len(heap._pending_free_blocks), 0) self.assertEqual(len(heap._arenas), 0) self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks) self.assertEqual(len(heap._len_to_seq), 0) @test.support.cpython_only def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocessing.heap.BufferWrapper(1) b = multiprocessing.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double), ('z', c_longlong,) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, z, foo, arr, string): x.value *= 2 y.value *= 2 z.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) z = Value(c_longlong, 2 ** 33, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, z, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(z.value, 2 ** 34) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0, 2 ** 33) bar = copy(foo) foo.x = 0 foo.y = 0 foo.z = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) self.assertEqual(bar.z, 2 ** 33) @unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory") @hashlib_helper.requires_hashdigest('md5') class _TestSharedMemory(BaseTestCase): ALLOWED_TYPES = ('processes',) @staticmethod def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data): if isinstance(shmem_name_or_obj, str): local_sms = shared_memory.SharedMemory(shmem_name_or_obj) else: local_sms = shmem_name_or_obj local_sms.buf[:len(binary_data)] = binary_data local_sms.close() def _new_shm_name(self, prefix): # Add a PID to the name of a POSIX shared memory object to allow # running multiprocess tests (test_multiprocessing_fork, # test_multiprocessing_spawn, etc) in parallel. return prefix + str(os.getpid()) @unittest.skipIf(sys.platform == "win32", "test is broken on Windows") @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_shared_memory_basics(self): name_tsmb = self._new_shm_name('test01_tsmb') sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512) self.addCleanup(sms.unlink) # Verify attributes are readable. self.assertEqual(sms.name, name_tsmb) self.assertGreaterEqual(sms.size, 512) self.assertGreaterEqual(len(sms.buf), sms.size) # Modify contents of shared memory segment through memoryview. sms.buf[0] = 42 self.assertEqual(sms.buf[0], 42) # Attach to existing shared memory segment. also_sms = shared_memory.SharedMemory(name_tsmb) self.assertEqual(also_sms.buf[0], 42) also_sms.close() # Attach to existing shared memory segment but specify a new size. same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size) self.assertLess(same_sms.size, 20*sms.size) # Size was ignored. same_sms.close() # Creating Shared Memory Segment with -ve size with self.assertRaises(ValueError): shared_memory.SharedMemory(create=True, size=-2) # Attaching Shared Memory Segment without a name with self.assertRaises(ValueError): shared_memory.SharedMemory(create=False) # Test if shared memory segment is created properly, # when _make_filename returns an existing shared memory segment name with unittest.mock.patch( 'multiprocessing.shared_memory._make_filename') as mock_make_filename: NAME_PREFIX = shared_memory._SHM_NAME_PREFIX names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')] # Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary # because some POSIX compliant systems require name to start with / names = [NAME_PREFIX + name for name in names] mock_make_filename.side_effect = names shm1 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm1.unlink) self.assertEqual(shm1._name, names[0]) mock_make_filename.side_effect = names shm2 = shared_memory.SharedMemory(create=True, size=1) self.addCleanup(shm2.unlink) self.assertEqual(shm2._name, names[1]) if shared_memory._USE_POSIX: # Posix Shared Memory can only be unlinked once. Here we # test an implementation detail that is not observed across # all supported platforms (since WindowsNamedSharedMemory # manages unlinking on its own and unlink() does nothing). # True release of shared memory segment does not necessarily # happen until process exits, depending on the OS platform. name_dblunlink = self._new_shm_name('test01_dblunlink') sms_uno = shared_memory.SharedMemory( name_dblunlink, create=True, size=5000 ) with self.assertRaises(FileNotFoundError): try: self.assertGreaterEqual(sms_uno.size, 5000) sms_duo = shared_memory.SharedMemory(name_dblunlink) sms_duo.unlink() # First shm_unlink() call. sms_duo.close() sms_uno.close() finally: sms_uno.unlink() # A second shm_unlink() call is bad. with self.assertRaises(FileExistsError): # Attempting to create a new shared memory segment with a # name that is already in use triggers an exception. there_can_only_be_one_sms = shared_memory.SharedMemory( name_tsmb, create=True, size=512 ) if shared_memory._USE_POSIX: # Requesting creation of a shared memory segment with the option # to attach to an existing segment, if that name is currently in # use, should not trigger an exception. # Note: Using a smaller size could possibly cause truncation of # the existing segment but is OS platform dependent. In the # case of MacOS/darwin, requesting a smaller size is disallowed. class OptionalAttachSharedMemory(shared_memory.SharedMemory): _flags = os.O_CREAT | os.O_RDWR ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb) self.assertEqual(ok_if_exists_sms.size, sms.size) ok_if_exists_sms.close() # Attempting to attach to an existing shared memory segment when # no segment exists with the supplied name triggers an exception. with self.assertRaises(FileNotFoundError): nonexisting_sms = shared_memory.SharedMemory('test01_notthere') nonexisting_sms.unlink() # Error should occur on prior line. sms.close() # Test creating a shared memory segment with negative size with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=-1) # Test creating a shared memory segment with size 0 with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True, size=0) # Test creating a shared memory segment without size argument with self.assertRaises(ValueError): sms_invalid = shared_memory.SharedMemory(create=True) def test_shared_memory_across_processes(self): # bpo-40135: don't define shared memory block's name in case of # the failure when we run multiprocessing tests in parallel. sms = shared_memory.SharedMemory(create=True, size=512) self.addCleanup(sms.unlink) # Verify remote attachment to existing block by name is working. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms.name, b'howdy') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'howdy') # Verify pickling of SharedMemory instance also works. p = self.Process( target=self._attach_existing_shmem_then_write, args=(sms, b'HELLO') ) p.daemon = True p.start() p.join() self.assertEqual(bytes(sms.buf[:5]), b'HELLO') sms.close() @unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms") def test_shared_memory_SharedMemoryServer_ignores_sigint(self): # bpo-36368: protect SharedMemoryManager server process from # KeyboardInterrupt signals. smm = multiprocessing.managers.SharedMemoryManager() smm.start() # make sure the manager works properly at the beginning sl = smm.ShareableList(range(10)) # the manager's server should ignore KeyboardInterrupt signals, and # maintain its connection with the current process, and success when # asked to deliver memory segments. os.kill(smm._process.pid, signal.SIGINT) sl2 = smm.ShareableList(range(10)) # test that the custom signal handler registered in the Manager does # not affect signal handling in the parent process. with self.assertRaises(KeyboardInterrupt): os.kill(os.getpid(), signal.SIGINT) smm.shutdown() @unittest.skipIf(os.name != "posix", "resource_tracker is posix only") def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self): # bpo-36867: test that a SharedMemoryManager uses the # same resource_tracker process as its parent. cmd = '''if 1: from multiprocessing.managers import SharedMemoryManager smm = SharedMemoryManager() smm.start() sl = smm.ShareableList(range(10)) smm.shutdown() ''' #XXX: ensure correct resource_tracker rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd, **ENV) # Before bpo-36867 was fixed, a SharedMemoryManager not using the same # resource_tracker process as its parent would make the parent's # tracker complain about sl being leaked even though smm.shutdown() # properly released sl. self.assertFalse(err) def test_shared_memory_SharedMemoryManager_basics(self): smm1 = multiprocessing.managers.SharedMemoryManager() with self.assertRaises(ValueError): smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started smm1.start() lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ] lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ] doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name) self.assertEqual(len(doppleganger_list0), 5) doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name) self.assertGreaterEqual(len(doppleganger_shm0.buf), 32) held_name = lom[0].name smm1.shutdown() if sys.platform != "win32": # Calls to unlink() have no effect on Windows platform; shared # memory will only be released once final process exits. with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_shm = shared_memory.SharedMemory(name=held_name) with multiprocessing.managers.SharedMemoryManager() as smm2: sl = smm2.ShareableList("howdy") shm = smm2.SharedMemory(size=128) held_name = sl.shm.name if sys.platform != "win32": with self.assertRaises(FileNotFoundError): # No longer there to be attached to again. absent_sl = shared_memory.ShareableList(name=held_name) def test_shared_memory_ShareableList_basics(self): sl = shared_memory.ShareableList( ['howdy', b'HoWdY', -273.154, 100, None, True, 42] ) self.addCleanup(sl.shm.unlink) # Verify attributes are readable. self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q') # Exercise len(). self.assertEqual(len(sl), 7) # Exercise index(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') with self.assertRaises(ValueError): sl.index('100') self.assertEqual(sl.index(100), 3) # Exercise retrieving individual values. self.assertEqual(sl[0], 'howdy') self.assertEqual(sl[-2], True) # Exercise iterability. self.assertEqual( tuple(sl), ('howdy', b'HoWdY', -273.154, 100, None, True, 42) ) # Exercise modifying individual values. sl[3] = 42 self.assertEqual(sl[3], 42) sl[4] = 'some' # Change type at a given position. self.assertEqual(sl[4], 'some') self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[4] = 'far too many' self.assertEqual(sl[4], 'some') sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data self.assertEqual(sl[0], 'encodés') self.assertEqual(sl[1], b'HoWdY') # no spillage with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data self.assertEqual(sl[1], b'HoWdY') with self.assertRaisesRegex(ValueError, "exceeds available storage"): sl[1] = b'123456789' self.assertEqual(sl[1], b'HoWdY') # Exercise count(). with warnings.catch_warnings(): # Suppress BytesWarning when comparing against b'HoWdY'. warnings.simplefilter('ignore') self.assertEqual(sl.count(42), 2) self.assertEqual(sl.count(b'HoWdY'), 1) self.assertEqual(sl.count(b'adios'), 0) # Exercise creating a duplicate. name_duplicate = self._new_shm_name('test03_duplicate') sl_copy = shared_memory.ShareableList(sl, name=name_duplicate) try: self.assertNotEqual(sl.shm.name, sl_copy.shm.name) self.assertEqual(name_duplicate, sl_copy.shm.name) self.assertEqual(list(sl), list(sl_copy)) self.assertEqual(sl.format, sl_copy.format) sl_copy[-1] = 77 self.assertEqual(sl_copy[-1], 77) self.assertNotEqual(sl[-1], 77) sl_copy.shm.close() finally: sl_copy.shm.unlink() # Obtain a second handle on the same ShareableList. sl_tethered = shared_memory.ShareableList(name=sl.shm.name) self.assertEqual(sl.shm.name, sl_tethered.shm.name) sl_tethered[-1] = 880 self.assertEqual(sl[-1], 880) sl_tethered.shm.close() sl.shm.close() # Exercise creating an empty ShareableList. empty_sl = shared_memory.ShareableList() try: self.assertEqual(len(empty_sl), 0) self.assertEqual(empty_sl.format, '') self.assertEqual(empty_sl.count('any'), 0) with self.assertRaises(ValueError): empty_sl.index(None) empty_sl.shm.close() finally: empty_sl.shm.unlink() def test_shared_memory_ShareableList_pickling(self): sl = shared_memory.ShareableList(range(10)) self.addCleanup(sl.shm.unlink) serialized_sl = pickle.dumps(sl) deserialized_sl = pickle.loads(serialized_sl) self.assertTrue( isinstance(deserialized_sl, shared_memory.ShareableList) ) self.assertTrue(deserialized_sl[-1], 9) self.assertFalse(sl is deserialized_sl) deserialized_sl[4] = "changed" self.assertEqual(sl[4], "changed") # Verify data is not being put into the pickled representation. name = 'a' * len(sl.shm.name) larger_sl = shared_memory.ShareableList(range(400)) self.addCleanup(larger_sl.shm.unlink) serialized_larger_sl = pickle.dumps(larger_sl) self.assertTrue(len(serialized_sl) == len(serialized_larger_sl)) larger_sl.shm.close() deserialized_sl.shm.close() sl.shm.close() def test_shared_memory_cleaned_after_process_termination(self): cmd = '''if 1: import os, time, sys from multiprocessing import shared_memory # Create a shared_memory segment, and send the segment name sm = shared_memory.SharedMemory(create=True, size=10) sys.stdout.write(sm.name + '\\n') sys.stdout.flush() time.sleep(100) ''' with subprocess.Popen([sys.executable, '-E', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: name = p.stdout.readline().strip().decode() # killing abruptly processes holding reference to a shared memory # segment should not leak the given memory segment. p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT t = 0.1 while getattr(time,'monotonic',time.time)() < deadline: time.sleep(t) t = min(t*2, 5) try: smm = shared_memory.SharedMemory(name, create=False) except FileNotFoundError: break else: raise AssertionError("A SharedMemory segment was leaked after" " a process was abruptly terminated.") if os.name == 'posix': # Without this line it was raising warnings like: # UserWarning: resource_tracker: # There appear to be 1 leaked shared_memory # objects to clean up at shutdown # See: https://bugs.python.org/issue45209 resource_tracker.unregister(f"/{name}", "shared_memory") # A warning was emitted by the subprocess' own # resource_tracker (on Windows, shared memory segments # are released automatically by the OS). err = p.stderr.read().decode() self.assertIn( "resource_tracker: There appear to be 1 leaked " "shared_memory objects to clean up at shutdown", err) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): for i in range(3): gc.collect() self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a gc.collect() # For PyPy or other GCs. b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called gc.collect() # For PyPy or other GCs. c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) @test.support.cpython_only def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with test.support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocessing.__file__) pattern = os.path.join(glob.escape(folder), '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() p.close() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() p.close() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocessing.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocessing.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocessing.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocessing.connection.Connection, -1) @hashlib_helper.requires_hashdigest('md5') class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 @hashlib_helper.requires_hashdigest('md5') class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocessing.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocessing.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) p = multiprocessing.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocessing.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.create_server((socket_helper.HOST, 0)) addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocessing.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocessing.Pipe() start = getattr(time,'monotonic',time.time)() res = wait([a, b], expected) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocessing.Semaphore(0) a, b = multiprocessing.Pipe() p = multiprocessing.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], expected + 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = getattr(time,'monotonic',time.time)() res = wait([a, p.sentinel, b], 20) delta = getattr(time,'monotonic',time.time)() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocessing.Pipe() t = getattr(time,'monotonic',time.time)() res = wait([a], timeout=-1) t = getattr(time,'monotonic',time.time)() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocessing.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocessing.Pipe(duplex=True) l = multiprocessing.connection.Listener(family='AF_INET') p = multiprocessing.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() join_process(p) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocessing.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = test.support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = test.support.script_helper.assert_python_ok(name, sm, **ENV) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() join_process(p) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocessing.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocessing.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() join_process(p) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocessing.Pipe() fd = self.get_high_socket_fd() try: p = multiprocessing.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() join_process(p) finally: self.close(fd) writer.close() reader.close() if multiprocessing.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocessing.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocessing.Pipe() try: p = multiprocessing.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocessing.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocessing.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocessing.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['spawn', 'fork'] or methods == ['fork', 'spawn', 'forkserver'] or methods == ['spawn', 'fork', 'forkserver']) def _test_preload_resources(self): if multiprocessing.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = test.support.script_helper.assert_python_ok(name, **ENV) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestResourceTracker(unittest.TestCase): def _test_resource_tracker(self): # # Check that killing process does not leak named semaphores # cmd = '''if 1: import time, os, tempfile import multiprocess as mp from multiprocess import resource_tracker from multiprocess.shared_memory import SharedMemory mp.set_start_method("spawn") rand = tempfile._RandomNameSequence() def create_and_register_resource(rtype): if rtype == "semaphore": lock = mp.Lock() return lock, lock._semlock.name elif rtype == "shared_memory": sm = SharedMemory(create=True, size=10) return sm, sm._name else: raise ValueError( "Resource type {{}} not understood".format(rtype)) resource1, rname1 = create_and_register_resource("{rtype}") resource2, rname2 = create_and_register_resource("{rtype}") os.write({w}, rname1.encode("ascii") + b"\\n") os.write({w}, rname2.encode("ascii") + b"\\n") time.sleep(10) ''' for rtype in resource_tracker._CLEANUP_FUNCS: with self.subTest(rtype=rtype): if rtype == "noop": # Artefact resource type used by the resource_tracker continue r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd.format(w=w, rtype=rtype)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _resource_unlink(name1, rtype) p.terminate() p.wait() deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT while getattr(time,'monotonic',time.time)() < deadline: time.sleep(.5) try: _resource_unlink(name2, rtype) except OSError as e: # docs say it should be ENOENT, but OSX seems to give # EINVAL self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL)) break else: raise AssertionError( f"A {rtype} resource was leaked after a process was " f"abruptly terminated.") err = p.stderr.read().decode('utf-8') p.stderr.close() expected = ('resource_tracker: There appear to be 2 leaked {} ' 'objects'.format( rtype)) self.assertRegex(err, expected) self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1) def check_resource_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.resource_tracker import _resource_tracker pid = _resource_tracker._pid if pid is not None: os.kill(pid, signal.SIGKILL) support.wait_process(pid, exitcode=-signal.SIGKILL) with warnings.catch_warnings(): warnings.simplefilter("ignore") _resource_tracker.ensure_running() pid = _resource_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocessing.get_context("spawn") with warnings.catch_warnings(record=True) as all_warn: warnings.simplefilter("always") sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) if should_die: self.assertEqual(len(all_warn), 1) the_warn = all_warn[0] self.assertTrue(issubclass(the_warn.category, UserWarning)) self.assertTrue("resource_tracker: process died" in str(the_warn.message)) else: self.assertEqual(len(all_warn), 0) def test_resource_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGINT, False) def test_resource_tracker_sigterm(self): # Catchable signal (ignored by semaphore tracker) self.check_resource_tracker_death(signal.SIGTERM, False) def test_resource_tracker_sigkill(self): # Uncatchable signal. self.check_resource_tracker_death(signal.SIGKILL, True) @staticmethod def _is_resource_tracker_reused(conn, pid): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() # The pid should be None in the child process, expect for the fork # context. It should not be a new value. reused = _resource_tracker._pid in (None, pid) reused &= _resource_tracker._check_alive() conn.send(reused) def test_resource_tracker_reused(self): from multiprocess.resource_tracker import _resource_tracker _resource_tracker.ensure_running() pid = _resource_tracker._pid r, w = multiprocessing.Pipe(duplex=False) p = multiprocessing.Process(target=self._is_resource_tracker_reused, args=(w, pid)) p.start() is_resource_tracker_reused = r.recv() # Clean up p.join() w.close() r.close() self.assertTrue(is_resource_tracker_reused) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocessing.SimpleQueue() child_can_start = multiprocessing.Event() parent_can_continue = multiprocessing.Event() proc = multiprocessing.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() def test_close(self): queue = multiprocessing.SimpleQueue() queue.close() # closing a queue twice should not fail queue.close() # Test specific to CPython since it tests private attributes @test.support.cpython_only def test_closed(self): queue = multiprocessing.SimpleQueue() queue.close() self.assertTrue(queue._reader.closed) self.assertTrue(queue._writer.closed) class TestPoolNotLeakOnFailure(unittest.TestCase): def test_release_unused_processes(self): # Issue #19675: During pool creation, if we can't create a process, # don't leak already created ones. will_fail_in = 3 forked_processes = [] class FailingForkProcess: def __init__(self, **kwargs): self.name = 'Fake Process' self.exitcode = None self.state = None forked_processes.append(self) def start(self): nonlocal will_fail_in if will_fail_in <= 0: raise OSError("Manually induced OSError") will_fail_in -= 1 self.state = 'started' def terminate(self): self.state = 'stopping' def join(self): if self.state == 'stopping': self.state = 'stopped' def is_alive(self): return self.state == 'started' or self.state == 'stopping' with self.assertRaisesRegex(OSError, 'Manually induced OSError'): p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock( Process=FailingForkProcess)) p.close() p.join() self.assertFalse( any(process.is_alive() for process in forked_processes)) @hashlib_helper.requires_hashdigest('md5') class TestSyncManagerTypes(unittest.TestCase): """Test all the types which can be shared between a parent and a child process by using a manager which acts as an intermediary between them. In the following unit-tests the base type is created in the parent process, the @classmethod represents the worker process and the shared object is readable and editable between the two. # The child. @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.append(6) # The parent. def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert o[1] == 6 """ manager_class = multiprocessing.managers.SyncManager def setUp(self): self.manager = self.manager_class() self.manager.start() self.proc = None def tearDown(self): if self.proc is not None and self.proc.is_alive(): self.proc.terminate() self.proc.join() self.manager.shutdown() self.manager = None self.proc = None @classmethod def setUpClass(cls): support.reap_children() tearDownClass = setUpClass def wait_proc_exit(self): # Only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395). join_process(self.proc) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break def run_worker(self, worker, obj): self.proc = multiprocessing.Process(target=worker, args=(obj, )) self.proc.daemon = True self.proc.start() self.wait_proc_exit() self.assertEqual(self.proc.exitcode, 0) @classmethod def _test_event(cls, obj): assert obj.is_set() obj.wait() obj.clear() obj.wait(0.001) def test_event(self): o = self.manager.Event() o.set() self.run_worker(self._test_event, o) assert not o.is_set() o.wait(0.001) @classmethod def _test_lock(cls, obj): obj.acquire() def test_lock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_lock, o) o.release() self.assertRaises(RuntimeError, o.release) # already released @classmethod def _test_rlock(cls, obj): obj.acquire() obj.release() def test_rlock(self, lname="Lock"): o = getattr(self.manager, lname)() self.run_worker(self._test_rlock, o) @classmethod def _test_semaphore(cls, obj): obj.acquire() def test_semaphore(self, sname="Semaphore"): o = getattr(self.manager, sname)() self.run_worker(self._test_semaphore, o) o.release() def test_bounded_semaphore(self): self.test_semaphore(sname="BoundedSemaphore") @classmethod def _test_condition(cls, obj): obj.acquire() obj.release() def test_condition(self): o = self.manager.Condition() self.run_worker(self._test_condition, o) @classmethod def _test_barrier(cls, obj): assert obj.parties == 5 obj.reset() def test_barrier(self): o = self.manager.Barrier(5) self.run_worker(self._test_barrier, o) @classmethod def _test_pool(cls, obj): # TODO: fix https://bugs.python.org/issue35919 with obj: pass def test_pool(self): o = self.manager.Pool(processes=4) self.run_worker(self._test_pool, o) @classmethod def _test_queue(cls, obj): assert obj.qsize() == 2 assert obj.full() assert not obj.empty() assert obj.get() == 5 assert not obj.empty() assert obj.get() == 6 assert obj.empty() def test_queue(self, qname="Queue"): o = getattr(self.manager, qname)(2) o.put(5) o.put(6) self.run_worker(self._test_queue, o) assert o.empty() assert not o.full() def test_joinable_queue(self): self.test_queue("JoinableQueue") @classmethod def _test_list(cls, obj): assert obj[0] == 5 assert obj.count(5) == 1 assert obj.index(5) == 0 obj.sort() obj.reverse() for x in obj: pass assert len(obj) == 1 assert obj.pop(0) == 5 def test_list(self): o = self.manager.list() o.append(5) self.run_worker(self._test_list, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_dict(cls, obj): assert len(obj) == 1 assert obj['foo'] == 5 assert obj.get('foo') == 5 assert list(obj.items()) == [('foo', 5)] assert list(obj.keys()) == ['foo'] assert list(obj.values()) == [5] assert obj.copy() == {'foo': 5} assert obj.popitem() == ('foo', 5) def test_dict(self): o = self.manager.dict() o['foo'] = 5 self.run_worker(self._test_dict, o) assert not o self.assertEqual(len(o), 0) @classmethod def _test_value(cls, obj): assert obj.value == 1 assert obj.get() == 1 obj.set(2) def test_value(self): o = self.manager.Value('i', 1) self.run_worker(self._test_value, o) self.assertEqual(o.value, 2) self.assertEqual(o.get(), 2) @classmethod def _test_array(cls, obj): assert obj[0] == 0 assert obj[1] == 1 assert len(obj) == 2 assert list(obj) == [0, 1] def test_array(self): o = self.manager.Array('i', [0, 1]) self.run_worker(self._test_array, o) @classmethod def _test_namespace(cls, obj): assert obj.x == 0 assert obj.y == 1 def test_namespace(self): o = self.manager.Namespace() o.x = 0 o.y = 1 self.run_worker(self._test_namespace, o) class MiscTestCase(unittest.TestCase): def test__all__(self): # Just make sure names in blacklist are excluded support.check__all__(self, multiprocessing, extra=multiprocessing.__all__, blacklist=['SUBDEBUG', 'SUBWARNING', 'license', 'citation']) # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocessing.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() processes = set(multiprocessing.process._dangling) - set(cls.dangling[0]) if processes: test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocessing.Process connection = multiprocessing.connection current_process = staticmethod(multiprocessing.current_process) parent_process = staticmethod(multiprocessing.parent_process) active_children = staticmethod(multiprocessing.active_children) Pool = staticmethod(multiprocessing.Pool) Pipe = staticmethod(multiprocessing.Pipe) Queue = staticmethod(multiprocessing.Queue) JoinableQueue = staticmethod(multiprocessing.JoinableQueue) Lock = staticmethod(multiprocessing.Lock) RLock = staticmethod(multiprocessing.RLock) Semaphore = staticmethod(multiprocessing.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) Condition = staticmethod(multiprocessing.Condition) Event = staticmethod(multiprocessing.Event) Barrier = staticmethod(multiprocessing.Barrier) Value = staticmethod(multiprocessing.Value) Array = staticmethod(multiprocessing.Array) RawValue = staticmethod(multiprocessing.RawValue) RawArray = staticmethod(multiprocessing.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocessing.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocessing.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = getattr(time,'monotonic',time.time)() t = 0.01 while len(multiprocessing.active_children()) > 1: time.sleep(t) t *= 2 dt = getattr(time,'monotonic',time.time)() - start_time if dt >= 5.0: test.support.environment_altered = True support.print_warning(f"multiprocess.Manager still has " f"{multiprocessing.active_children()} " f"active children after {dt} seconds") break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. test.support.environment_altered = True support.print_warning('Shared objects which still exist ' 'at manager shutdown:') support.print_warning(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocessing.dummy.Process connection = multiprocessing.dummy.connection current_process = staticmethod(multiprocessing.dummy.current_process) active_children = staticmethod(multiprocessing.dummy.active_children) Pool = staticmethod(multiprocessing.dummy.Pool) Pipe = staticmethod(multiprocessing.dummy.Pipe) Queue = staticmethod(multiprocessing.dummy.Queue) JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) Lock = staticmethod(multiprocessing.dummy.Lock) RLock = staticmethod(multiprocessing.dummy.RLock) Semaphore = staticmethod(multiprocessing.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) Condition = staticmethod(multiprocessing.dummy.Condition) Event = staticmethod(multiprocessing.dummy.Event) Barrier = staticmethod(multiprocessing.dummy.Barrier) Value = staticmethod(multiprocessing.dummy.Value) Array = staticmethod(multiprocessing.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass if type_ == 'manager': Temp = hashlib_helper.requires_hashdigest('md5')(Temp) Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocessing.set_forkserver_preload(PRELOAD) multiprocessing.process._cleanup() dangling[0] = multiprocessing.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocessing.get_start_method(allow_none=True) try: multiprocessing.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocessing.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocessing objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. test.support.gc_collect() multiprocessing.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocessing.process._dangling) - set(dangling[0]) if processes: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling processes: {processes}') processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True test.support.environment_altered = True support.print_warning(f'Dangling threads: {threads}') threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocessing.util._cleanup_tests() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/__main__.py000066400000000000000000000016201455552142400265370ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') tests = glob.glob(suite + os.path.sep + '__init__.py') + \ [i for i in tests if 'main' not in i] if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: failed = 1 print('') exit(failed) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/mp_fork_bomb.py000066400000000000000000000007001455552142400274510ustar00rootroot00000000000000import multiprocessing, sys def foo(): print("123") # Because "if __name__ == '__main__'" is missing this will not work # correctly on Windows. However, we should get a RuntimeError rather # than the Windows equivalent of a fork bomb. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) else: multiprocessing.set_start_method('spawn') p = multiprocessing.Process(target=foo) p.start() p.join() sys.exit(p.exitcode) uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/mp_preload.py000066400000000000000000000005551455552142400271470ustar00rootroot00000000000000import multiprocessing multiprocessing.Lock() def f(): print("ok") if __name__ == "__main__": ctx = multiprocessing.get_context("forkserver") modname = "multiprocess.tests.mp_preload" # Make sure it's importable __import__(modname) ctx.set_forkserver_preload([modname]) proc = ctx.Process(target=f) proc.start() proc.join() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/test_multiprocessing_fork.py000066400000000000000000000007341455552142400323330ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("fork is not available on Windows") if sys.platform == 'darwin': raise unittest.SkipTest("test may crash on macOS (bpo-33725)") install_tests_in_module_dict(globals(), 'fork') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/test_multiprocessing_forkserver.py000066400000000000000000000006071455552142400335610ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict import sys from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") if sys.platform == "win32": raise unittest.SkipTest("forkserver is not available on Windows") install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/test_multiprocessing_main_handling.py000066400000000000000000000271601455552142400341640ustar00rootroot00000000000000# tests __main__ module handling in multiprocessing from test import support # Skip tests if _multiprocessing wasn't built. support.import_module('_multiprocessing') import importlib import importlib.machinery import unittest import sys import os import os.path import py_compile from test.support.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, assert_python_ok) if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") # Look up which start methods are available to test import multiprocess as multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) # Issue #22332: Skip tests if sem_open implementation is broken. support.import_module('multiprocess.synchronize') verbose = support.verbose test_source = """\ # multiprocessing includes all sorts of shenanigans to make __main__ # attributes accessible in the subprocess in a pickle compatible way. # We run the "doesn't work in the interactive interpreter" example from # the docs to make sure it *does* work from an executed __main__, # regardless of the invocation mechanism import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method # We use this __main__ defined function in the map call below in order to # check that multiprocessing in correctly running the unguarded # code in child processes and then making it available as __main__ def f(x): return x*x # Check explicit relative imports if "check_sibling" in __file__: # We're inside a package and not in a __main__.py file # so make sure explicit relative imports work correctly from . import sibling if __name__ == '__main__': start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(f, [1, 2, 3], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) test_source_main_skipped_in_children = """\ # __main__.py files have an implied "if __name__ == '__main__'" so # multiprocessing should always skip running them in child processes # This means we can't use __main__ defined functions in child processes, # so we just use "int" as a passthrough operation below if __name__ != "__main__": raise RuntimeError("Should only be called as __main__!") import sys import time sys.path.extend({0}) from multiprocess import Pool, set_start_method start_method = sys.argv[1] set_start_method(start_method) results = [] with Pool(5) as pool: pool.map_async(int, [1, 4, 9], callback=results.extend) start_time = getattr(time,'monotonic',time.time)() while not results: time.sleep(0.05) # up to 1 min to report the results dt = getattr(time,'monotonic',time.time)() - start_time if dt > 60.0: raise RuntimeError("Timed out waiting for results (%.1f sec)" % dt) results.sort() print(start_method, "->", results) pool.join() """.format(sys.path) # These helpers were copied from test_cmd_line_script & tweaked a bit... def _make_test_script(script_dir, script_basename, source=test_source, omit_suffix=False): to_return = make_script(script_dir, script_basename, source, omit_suffix) # Hack to check explicit relative imports if script_basename == "check_sibling": make_script(script_dir, "sibling", "") importlib.invalidate_caches() return to_return def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source=test_source, depth=1): to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth) importlib.invalidate_caches() return to_return # There's no easy way to pass the script directory in to get # -m to work (avoiding that is the whole point of making # directories and zipfiles executable!) # So we fake it for testing purposes with a custom launch script launch_source = """\ import sys, os.path, runpy sys.path.insert(0, %s) runpy._run_module_as_main(%r) """ def _make_launch_script(script_dir, script_basename, module_name, path=None): if path is None: path = "os.path.dirname(__file__)" else: path = repr(path) source = launch_source % (path, module_name) to_return = make_script(script_dir, script_basename, source) importlib.invalidate_caches() return to_return class MultiProcessingCmdLineMixin(): maxDiff = None # Show full tracebacks on subprocess failure def setUp(self): if self.start_method not in AVAILABLE_START_METHODS: self.skipTest("%r start method not available" % self.start_method) def _check_output(self, script_name, exit_code, out, err): if verbose > 1: print("Output from test script %r:" % script_name) print(repr(out)) self.assertEqual(exit_code, 0) self.assertEqual(err.decode('utf-8'), '') expected_results = "%s -> [1, 4, 9]" % self.start_method self.assertEqual(out.decode('utf-8').strip(), expected_results) def _check_script(self, script_name, *cmd_line_switches): if not __debug__: cmd_line_switches += ('-' + 'O' * sys.flags.optimize,) run_args = cmd_line_switches + (script_name, self.start_method) rc, out, err = assert_python_ok(*run_args, __isolated=False) self._check_output(script_name, rc, out, err) def test_basic_script(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') self._check_script(script_name) def test_basic_script_no_suffix(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script', omit_suffix=True) self._check_script(script_name) def test_ipython_workaround(self): # Some versions of the IPython launch script are missing the # __name__ = "__main__" guard, and multiprocessing has long had # a workaround for that case # See https://github.com/ipython/ipython/issues/4698 source = test_source_main_skipped_in_children with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'ipython', source=source) self._check_script(script_name) script_no_suffix = _make_test_script(script_dir, 'ipython', source=source, omit_suffix=True) self._check_script(script_no_suffix) def test_script_compiled(self): with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, 'script') py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(pyc_file) def test_directory(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) self._check_script(script_dir) def test_directory_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) self._check_script(script_dir) def test_zipfile(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name) self._check_script(zip_name) def test_zipfile_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: script_name = _make_test_script(script_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name) self._check_script(zip_name) def test_module_in_package(self): with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, 'check_sibling') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.check_sibling') self._check_script(launch_name) def test_module_in_package_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script') launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name) self._check_script(launch_name) def test_module_in_subpackage_in_zipfile(self): with support.temp_dir() as script_dir: zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name) self._check_script(launch_name) def test_package(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) def test_package_compiled(self): source = self.main_in_children_source with support.temp_dir() as script_dir: pkg_dir = os.path.join(script_dir, 'test_pkg') make_pkg(pkg_dir) script_name = _make_test_script(pkg_dir, '__main__', source=source) compiled_name = py_compile.compile(script_name, doraise=True) os.remove(script_name) pyc_file = support.make_legacy_pyc(script_name) launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_script(launch_name) # Test all supported start methods (setupClass skips as appropriate) class SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'spawn' main_in_children_source = test_source_main_skipped_in_children class ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'fork' main_in_children_source = test_source class ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase): start_method = 'forkserver' main_in_children_source = test_source_main_skipped_in_children def tearDownModule(): support.reap_children() if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/tests/test_multiprocessing_spawn.py000066400000000000000000000004241455552142400325160ustar00rootroot00000000000000import unittest from multiprocess.tests import install_tests_in_module_dict from test import support if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") install_tests_in_module_dict(globals(), 'spawn') if __name__ == '__main__': unittest.main() uqfoundation-multiprocess-b3457a5/pypy3.9/multiprocess/util.py000066400000000000000000000332521455552142400246400ustar00rootroot00000000000000# # Module providing various facilities to other parts of the package # # multiprocessing/util.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import os import itertools import sys import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 LOGGER_NAME = 'multiprocess' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args): if _logger: _logger.log(SUBDEBUG, msg, *args) def debug(msg, *args): if _logger: _logger.log(DEBUG, msg, *args) def info(msg, *args): if _logger: _logger.log(INFO, msg, *args) def sub_warning(msg, *args): if _logger: _logger.log(SUBWARNING, msg, *args) def get_logger(): ''' Returns logger used by multiprocess ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger # Abstract socket support def _platform_supports_abstract_sockets(): if sys.platform == "linux": return True if hasattr(sys, 'getandroidapilevel'): return True return False def is_abstract_socket_namespace(address): if not address: return False if isinstance(address, bytes): return address[0] == 0 elif isinstance(address, str): return address[0] == "\0" raise TypeError(f'address type of {address!r} unrecognized') abstract_sockets_supported = _platform_supports_abstract_sockets() # # Function returning a temp directory which will be removed on exit # def _remove_temp_dir(rmtree, tempdir): rmtree(tempdir) current_process = process.current_process() # current_process() can be None if the finalizer is called # late during Python finalization if current_process is not None: current_process._config['tempdir'] = None def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up tempdir = process.current_process()._config.get('tempdir') if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) # keep a strong reference to shutil.rmtree(), since the finalizer # can be called late during Python shutdown Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), exitpriority=-100) process.current_process()._config['tempdir'] = tempdir return tempdir # # Support for reinitialization of objects when bootstrapping a child process # _afterfork_registry = weakref.WeakValueDictionary() _afterfork_counter = itertools.count() def _run_after_forkers(): items = list(_afterfork_registry.items()) items.sort() for (index, ident, func), obj in items: try: func(obj) except Exception as e: info('after forker raised exception %s', e) def register_after_fork(obj, func): _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj # # Finalization using weakrefs # _finalizer_registry = {} _finalizer_counter = itertools.count() class Finalize(object): ''' Class which supports object finalization using weakrefs ''' def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): if (exitpriority is not None) and not isinstance(exitpriority,int): raise TypeError( "Exitpriority ({0!r}) must be None or int, not {1!s}".format( exitpriority, type(exitpriority))) if obj is not None: self._weakref = weakref.ref(obj, self) elif exitpriority is None: raise ValueError("Without object, exitpriority cannot be None") self._callback = callback self._args = args self._kwargs = kwargs or {} self._key = (exitpriority, next(_finalizer_counter)) self._pid = os.getpid() _finalizer_registry[self._key] = self def __call__(self, wr=None, # Need to bind these locally because the globals can have # been cleared at shutdown _finalizer_registry=_finalizer_registry, sub_debug=sub_debug, getpid=os.getpid): ''' Run the callback unless it has already been called or cancelled ''' try: del _finalizer_registry[self._key] except KeyError: sub_debug('finalizer no longer registered') else: if self._pid != getpid(): sub_debug('finalizer ignored because different process') res = None else: sub_debug('finalizer calling %s with args %s and kwargs %s', self._callback, self._args, self._kwargs) res = self._callback(*self._args, **self._kwargs) self._weakref = self._callback = self._args = \ self._kwargs = self._key = None return res def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None def still_active(self): ''' Return whether this finalizer is still waiting to invoke callback ''' return self._key in _finalizer_registry def __repr__(self): try: obj = self._weakref() except (AttributeError, TypeError): obj = None if obj is None: return '<%s object, dead>' % self.__class__.__name__ x = '<%s object, callback=%s' % ( self.__class__.__name__, getattr(self._callback, '__name__', self._callback)) if self._args: x += ', args=' + str(self._args) if self._kwargs: x += ', kwargs=' + str(self._kwargs) if self._key[0] is not None: x += ', exitpriority=' + str(self._key[0]) return x + '>' def _run_finalizers(minpriority=None): ''' Run all finalizers whose exit priority is not None and at least minpriority Finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation. ''' if _finalizer_registry is None: # This function may be called after this module's globals are # destroyed. See the _exit_function function in this module for more # notes. return if minpriority is None: f = lambda p : p[0] is not None else: f = lambda p : p[0] is not None and p[0] >= minpriority # Careful: _finalizer_registry may be mutated while this function # is running (either by a GC run or by another thread). # list(_finalizer_registry) should be atomic, while # list(_finalizer_registry.items()) is not. keys = [key for key in list(_finalizer_registry) if f(key)] keys.sort(reverse=True) for key in keys: finalizer = _finalizer_registry.get(key) # key may have been removed from the registry if finalizer is not None: sub_debug('calling %s', finalizer) try: finalizer() except Exception: import traceback traceback.print_exc() if minpriority is None: _finalizer_registry.clear() # # Clean up on exit # def is_exiting(): ''' Returns true if the process is shutting down ''' return _exiting or _exiting is None _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, active_children=process.active_children, current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. global _exiting if not _exiting: _exiting = True info('process shutting down') debug('running all "atexit" finalizers with priority >= 0') _run_finalizers(0) if current_process() is not None: # We check if the current process is None here because if # it's None, any call to ``active_children()`` will raise # an AttributeError (active_children winds up trying to # get attributes from util._current_process). One # situation where this can happen is if someone has # manipulated sys.modules, causing this module to be # garbage collected. The destructor for the module type # then replaces all values in the module dict with None. # For instance, after setuptools runs a test it replaces # sys.modules with a copy created earlier. See issues # #9775 and #15881. Also related: #4106, #9205, and # #9207. for p in active_children(): if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() for p in active_children(): info('calling join() for process %s', p.name) p.join() debug('running the remaining "atexit" finalizers') _run_finalizers() atexit.register(_exit_function) # # Some fork aware types # class ForkAwareThreadLock(object): def __init__(self): self._lock = threading.Lock() self.acquire = self._lock.acquire self.release = self._lock.release register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) def _at_fork_reinit(self): self._lock._at_fork_reinit() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) class ForkAwareLocal(threading.local): def __init__(self): register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () # # Close fds except those specified # try: MAXFD = os.sysconf("SC_OPEN_MAX") except Exception: MAXFD = 256 def close_all_fds_except(fds): fds = list(fds) + [-1, MAXFD] fds.sort() assert fds[-1] == MAXFD, 'fd too large' for i in range(len(fds) - 1): os.closerange(fds[i]+1, fds[i+1]) # # Close sys.stdin and replace stdin with os.devnull # def _close_stdin(): if sys.stdin is None: return try: sys.stdin.close() except (OSError, ValueError): pass try: fd = os.open(os.devnull, os.O_RDONLY) try: sys.stdin = open(fd, closefd=False) except: os.close(fd) raise except (OSError, ValueError): pass # # Flush standard streams, if any # def _flush_std_streams(): try: sys.stdout.flush() except (AttributeError, ValueError): pass try: sys.stderr.flush() except (AttributeError, ValueError): pass # # Start a program with only specified fds kept open # def spawnv_passfds(path, args, passfds): import _posixsubprocess passfds = tuple(sorted(map(int, passfds))) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [os.fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None, None, None, -1, None) finally: os.close(errpipe_read) os.close(errpipe_write) def close_fds(*fds): """Close each file descriptor given as an argument""" for fd in fds: os.close(fd) def _cleanup_tests(): """Cleanup multiprocessing resources when multiprocessing tests completed.""" from test import support # cleanup multiprocessing process._cleanup() # Stop the ForkServer process if it's running from multiprocess import forkserver forkserver._forkserver._stop() # Stop the ResourceTracker process if it's running from multiprocess import resource_tracker resource_tracker._resource_tracker._stop() # bpo-37421: Explicitly call _run_finalizers() to remove immediately # temporary directories created by multiprocessing.util.get_temp_dir(). _run_finalizers() support.gc_collect() support.reap_children() uqfoundation-multiprocess-b3457a5/setup.cfg000066400000000000000000000001761455552142400211450ustar00rootroot00000000000000[egg_info] #tag_build = .dev0 [bdist_wheel] #python-tag = py3 #plat-name = manylinux_2_28_x86_64 [sdist] #formats=zip,gztar uqfoundation-multiprocess-b3457a5/setup.py000066400000000000000000000264651455552142400210470ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE # # original code modified from processing/setup.py # original: Copyright (c) 2006-2008, R Oudkerk # original: Licence 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/COPYING.txt import re import os import sys import glob # drop support for older python if sys.version_info < (3, 8): unsupported = 'Versions of Python before 3.8 are not supported' raise ValueError(unsupported) #is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') # the code is version-specific, so get the appropriate root directory root = 'pypy' if is_pypy else 'py' pymajor,pyminor = sys.version_info[:2] pkgdir = '%s%s.%s' % (root,pymajor,pyminor) pkgname = 'multiprocess' # if sys.version is higher than explicitly supported, try the latest version HERE = os.path.dirname(os.path.abspath(__file__)) while not os.path.exists(os.path.join(HERE,'%s%s.%s' % (root,pymajor,pyminor))): pyminor -= 1 if pyminor < 0: unsupported = 'Python %s is not supported' % pkgdir[len(root):] raise ValueError(unsupported) if '%s%s.%s' % (root,pymajor,pyminor) != pkgdir: msg = 'Warning: Python %s is not currently supported, reverting to %s.%s' print(msg % (pkgdir[len(root):],pymajor,pyminor)) pkgdir = '%s%s.%s' % (root,pymajor,pyminor) srcdir = '%s/Modules/_%s' % (pkgdir, pkgname) # -*- Distribution Meta -*- here = os.path.abspath(os.path.dirname(__file__)) sys.path.append(here) from version import (__version__, __author__, __contact__ as AUTHOR_EMAIL, get_license_text, get_readme_as_rst, write_info_file) LICENSE = get_license_text(os.path.join(here, 'LICENSE')) README = get_readme_as_rst(os.path.join(here, 'README.md')) # write meta info file vers = glob.glob(os.path.join(here,'py3*')) + glob.glob(os.path.join(here,'pypy3*')) for ver in vers: ver = os.path.basename(ver) write_info_file(here, '%s/multiprocess' % ver, doc=README, license=LICENSE, version=__version__, author=__author__) del here, get_license_text, get_readme_as_rst, write_info_file, ver, vers # check if setuptools is available try: from setuptools import setup, Extension, find_packages from setuptools.dist import Distribution has_setuptools = True except ImportError: from distutils.core import setup, Extension # noqa Distribution = object find_packages = lambda **kwds: [pkgname, pkgname+'.dummy', pkgname+'.tests'] has_setuptools = False from distutils import sysconfig from distutils.errors import CCompilerError, DistutilsExecError, \ DistutilsPlatformError ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32': # distutils.msvc9compiler can raise IOError if the compiler is missing ext_errors += (IOError, ) BUILD_WARNING = """ ----------------------------------------------------------------------- WARNING: The C extensions could not be compiled ----------------------------------------------------------------------- Maybe you do not have a C compiler installed on this system? The reason was: %s This is just a warning as most of the functionality will work even without the updated C extension. It will simply fallback to the built-in _multiprocessing module. Most notably you will not be able to use FORCE_EXECV on POSIX systems. If this is a problem for you then please install a C compiler or fix the error(s) above. ----------------------------------------------------------------------- """ # # Macros and libraries # # The `macros` dict determines the macros that will be defined when # the C extension is compiled. Each value should be either 0 or 1. # (An undefined macro is assumed to have value 0.) `macros` is only # used on Unix platforms. # # The `libraries` dict determines the libraries to which the C # extension will be linked. This should probably be either `['rt']` # if you need `librt` or else `[]`. # # Meaning of macros # # HAVE_SEM_OPEN # Set this to 1 if you have `sem_open()`. This enables the use of # posix named semaphores which are necessary for the # implementation of the synchronization primitives on Unix. If # set to 0 then the only way to create synchronization primitives # will be via a manager (e.g. "m = Manager(); lock = m.Lock()"). # # HAVE_SEM_TIMEDWAIT # Set this to 1 if you have `sem_timedwait()`. Otherwise polling # will be necessary when waiting on a semaphore using a timeout. # # HAVE_FD_TRANSFER # Set this to 1 to compile functions for transferring file # descriptors between processes over an AF_UNIX socket using a # control message with type SCM_RIGHTS. On Unix the pickling of # of socket and connection objects depends on this feature. # # If you get errors about missing CMSG_* macros then you should # set this to 0. # # HAVE_BROKEN_SEM_GETVALUE # Set to 1 if `sem_getvalue()` does not work or is unavailable. # On Mac OSX it seems to return -1 with message "[Errno 78] # Function not implemented". # # HAVE_BROKEN_SEM_UNLINK # Set to 1 if `sem_unlink()` is unnecessary. For some reason this # seems to be the case on Cygwin where `sem_unlink()` is missing # from semaphore.h. # if sys.platform == 'win32': # Windows macros = dict() libraries = ['ws2_32'] elif sys.platform.startswith('darwin'): # Mac OSX macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, HAVE_BROKEN_SEM_GETVALUE=1 ) libraries = [] elif sys.platform.startswith('cygwin'): # Cygwin macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=0, HAVE_BROKEN_SEM_UNLINK=1 ) libraries = [] elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'): # FreeBSD's P1003.1b semaphore support is very experimental # and has many known problems. (as of June 2008) macros = dict( # FreeBSD 4-6 HAVE_SEM_OPEN=0, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] elif re.match('^(gnukfreebsd(8|9|10|11)|freebsd(7|8|9|10))', sys.platform): macros = dict( # FreeBSD 7+ and GNU/kFreeBSD 8+ HAVE_SEM_OPEN=bool( sysconfig.get_config_var('HAVE_SEM_OPEN') and not bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')) ), HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = [] elif sys.platform.startswith('openbsd'): macros = dict( # OpenBSD HAVE_SEM_OPEN=0, # Not implemented HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] else: # Linux and other unices macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = ['rt'] if sys.platform == 'win32': multiprocessing_srcs = [ '%s/%s.c' % (srcdir, pkgname), '%s/semaphore.c' % srcdir, ] else: multiprocessing_srcs = [ '%s/%s.c' % (srcdir, pkgname) ] if macros.get('HAVE_SEM_OPEN', False): multiprocessing_srcs.append('%s/semaphore.c' % srcdir) #meta['long_doc'] = open(os.path.join(HERE, 'README.md')).read() # -*- Installation -*- def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')): for arg in argv: if arg.startswith(cmds): return arg # force python-, abi-, and platform-specific naming of bdist_wheel class BinaryDistribution(Distribution): """Distribution which forces a binary package with platform name""" def has_ext_modules(foo): return True # define dependencies dill_version = 'dill>=0.3.8' def run_setup(with_extensions=True): extensions = [] if with_extensions: extensions = [ Extension( '_%s' % pkgname, sources=multiprocessing_srcs, define_macros=list(macros.items()), libraries=libraries, include_dirs=[srcdir], depends=glob.glob('%s/*.h' % srcdir) + ['setup.py'], ), ] packages = find_packages( where=pkgdir, exclude=['ez_setup', 'examples', 'doc',], ) # build the 'setup' call setup_kwds = dict( name='multiprocess', version=__version__, description=('better multiprocessing and multithreading in Python'), long_description=README.strip(), author=__author__, author_email=AUTHOR_EMAIL, maintainer=__author__, maintainer_email=AUTHOR_EMAIL, license = 'BSD-3-Clause', platforms = ['Linux', 'Windows', 'Mac'], url='https://github.com/uqfoundation/multiprocess', download_url = 'https://pypi.org/project/multiprocess/#files', project_urls = { 'Documentation':'http://multiprocess.rtfd.io', 'Source Code':'https://github.com/uqfoundation/multiprocess', 'Bug Tracker':'https://github.com/uqfoundation/multiprocess/issues', }, python_requires = '>=3.8', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering', 'Topic :: Software Development', ], packages=packages, package_dir={'': pkgdir}, ext_modules=extensions, ) # add dependencies depend = [dill_version] extras = {'dill': [dill_version]} # update setup kwds if has_setuptools: setup_kwds.update( zip_safe=False, # distclass=BinaryDistribution, install_requires=depend, # extras_require=extras, ) # call setup setup(**setup_kwds) try: run_setup(False) except BaseException: if _is_build_command(sys.argv): #XXX: skip WARNING if is_pypy? import traceback msg = BUILD_WARNING % '\n'.join(traceback.format_stack()) exec('print(msg, file=sys.stderr)') run_setup(False) else: raise # if dependencies are missing, print a warning try: import dill except ImportError: print("\n***********************************************************") print("WARNING: One of the following dependencies is unresolved:") print(" %s" % dill_version) print("***********************************************************\n") if __name__=='__main__': pass # end of file uqfoundation-multiprocess-b3457a5/tox.ini000066400000000000000000000015131455552142400206330ustar00rootroot00000000000000[tox] skipdist=True skip_missing_interpreters= True envlist = py38 py39 py310 py311 py312 py313 pypy38 pypy39 pypy310 [testenv] setenv = # recreate = True deps = # dill whitelist_externals = # bash commands = {envpython} -m pip install . py38: {envpython} py3.8/multiprocess/tests/__main__.py py39: {envpython} py3.9/multiprocess/tests/__main__.py py310: {envpython} py3.10/multiprocess/tests/__main__.py py311: {envpython} py3.11/multiprocess/tests/__main__.py py312: {envpython} py3.12/multiprocess/tests/__main__.py py313: {envpython} py3.13/multiprocess/tests/__main__.py pypy38: {envpython} pypy3.8/multiprocess/tests/__main__.py pypy39: {envpython} pypy3.9/multiprocess/tests/__main__.py pypy310: {envpython} pypy3.10/multiprocess/tests/__main__.py uqfoundation-multiprocess-b3457a5/version.py000066400000000000000000000062651455552142400213700ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE __version__ = '0.70.16'#.dev0' __author__ = 'Mike McKerns' __contact__ = 'mmckerns@uqfoundation.org' def get_license_text(filepath): "open the LICENSE file and read the contents" try: LICENSE = open(filepath).read() except: LICENSE = '' return LICENSE def get_readme_as_rst(filepath): "open the README file and read the markdown as rst" try: fh = open(filepath) name, null = fh.readline().rstrip(), fh.readline() tag, null = fh.readline(), fh.readline() tag = "%s: %s" % (name, tag) split = '-'*(len(tag)-1)+'\n' README = ''.join((null,split,tag,split,'\n')) skip = False for line in fh: if line.startswith('['): continue elif skip and line.startswith(' http'): README += '\n' + line elif line.startswith('* '): README += line.replace('* ',' - ',1) elif line.startswith('-'): README += line.replace('-','=') + '\n' elif line.startswith('!['): # image alt,img = line.split('](',1) if img.startswith('docs'): # relative path img = img.split('docs/source/',1)[-1] # make is in docs README += '.. image:: ' + img.replace(')','') README += ' :alt: ' + alt.replace('![','') + '\n' #elif ')[http' in line: # alt text link (`text `_) else: README += line skip = line.endswith(':\n') fh.close() except: README = '' return README def write_info_file(dirpath, modulename, **info): """write the given info to 'modulename/__info__.py' info expects: doc: the module's long_description version: the module's version string author: the module's author string license: the module's license contents """ import os infofile = os.path.join(dirpath, '%s/__info__.py' % modulename) header = '''#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE ''' #XXX: package, author, and email are hardwired in the header doc = info.get('doc', None) version = info.get('version', None) author = info.get('author', None) license = info.get('license', None) with open(infofile, 'w') as fh: fh.write(header) if doc is not None: fh.write("'''%s'''\n\n" % doc) fh.write("__all__ = []\n") # needed for test_import if version is not None: fh.write("__version__ = %r\n" % version) if author is not None: fh.write("__author__ = %r\n\n" % author) if license is not None: fh.write("__license__ = '''\n%s'''\n" % license) return