billiard-3.3.0.15/0000755000076500000000000000000012276217622014144 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/0000755000076500000000000000000012276217622015726 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/__init__.py0000644000076500000000000002037112276217466020050 0ustar asksolwheel00000000000000"""Python multiprocessing fork with improvements and bugfixes""" # # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Try calling `multiprocessing.doc.main()` to read the html # documentation in a webbrowser. # # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import VERSION = (3, 3, 0, 15) __version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) __author__ = 'R Oudkerk / Python Software Foundation' __author_email__ = 'python-dev@python.org' __maintainer__ = 'Ask Solem', __contact__ = "ask@celeryproject.org" __homepage__ = "http://github.com/celery/billiard" __docformat__ = "restructuredtext" # -eof meta- __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger', 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable', 'forking_enable', 'forking_is_enabled' ] # # Imports # import os import sys import warnings from .exceptions import ( # noqa ProcessError, BufferTooShort, TimeoutError, AuthenticationError, TimeLimitExceeded, SoftTimeLimitExceeded, WorkerLostError, ) from .process import Process, current_process, active_children from .util import SUBDEBUG, SUBWARNING def ensure_multiprocessing(): from ._ext import ensure_multiprocessing return ensure_multiprocessing() W_NO_EXECV = """\ force_execv is not supported as the billiard C extension \ is not installed\ """ # # Definitions not depending on native semaphores # def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager() m.start() return m def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns two connection object connected by a pipe ''' from billiard.connection import Pipe return Pipe(duplex, rnonblock, wnonblock) def cpu_count(): ''' Returns the number of CPUs in the system ''' if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': comm = '/sbin/sysctl -n hw.ncpu' if sys.platform == 'darwin': comm = '/usr' + comm try: with os.popen(comm) as p: num = int(p.read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') def freeze_support(): ''' Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .forking import freeze_support freeze_support() def get_logger(): ''' Return package logger -- if it does not already exist then it is created ''' from .util import get_logger return get_logger() def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(): ''' Install support for sending connections and sockets between processes ''' from . import reduction # noqa # # Definitions depending on native semaphores # def Lock(): ''' Returns a non-recursive lock object ''' from .synchronize import Lock return Lock() def RLock(): ''' Returns a recursive lock object ''' from .synchronize import RLock return RLock() def Condition(lock=None): ''' Returns a condition object ''' from .synchronize import Condition return Condition(lock) def Semaphore(value=1): ''' Returns a semaphore object ''' from .synchronize import Semaphore return Semaphore(value) def BoundedSemaphore(value=1): ''' Returns a bounded semaphore object ''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value) def Event(): ''' Returns an event object ''' from .synchronize import Event return Event() def Queue(maxsize=0): ''' Returns a queue object ''' from .queues import Queue return Queue(maxsize) def JoinableQueue(maxsize=0): ''' Returns a queue object ''' from .queues import JoinableQueue return JoinableQueue(maxsize) def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None, timeout=None, soft_timeout=None, lost_worker_timeout=None, max_restarts=None, max_restart_freq=1, on_process_up=None, on_process_down=None, on_timeout_set=None, on_timeout_cancel=None, threads=True, semaphore=None, putlocks=False, allow_restart=False): ''' Returns a process pool object ''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, timeout, soft_timeout, lost_worker_timeout, max_restarts, max_restart_freq, on_process_up, on_process_down, on_timeout_set, on_timeout_cancel, threads, semaphore, putlocks, allow_restart) def RawValue(typecode_or_type, *args): ''' Returns a shared object ''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(typecode_or_type, *args, **kwds): ''' Returns a synchronized shared object ''' from .sharedctypes import Value return Value(typecode_or_type, *args, **kwds) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Returns a synchronized shared array ''' from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, **kwds) # # # def set_executable(executable): ''' Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. ''' from .forking import set_executable set_executable(executable) def forking_is_enabled(): ''' Returns a boolean value indicating whether billiard is currently set to create child processes by forking the current python process rather than by starting a new instances of python. On Windows this always returns `False`. On Unix it returns `True` by default. ''' from . import forking return forking._forking_is_enabled def forking_enable(value): ''' Enable/disable creation of child process by forking the current process. `value` should be a boolean value. If `value` is true then forking is enabled. If `value` is false then forking is disabled. On systems with `os.fork()` forking is enabled by default, and on other systems it is always disabled. ''' if not value: from ._ext import supports_exec if supports_exec: from . import forking if value and not hasattr(os, 'fork'): raise ValueError('os.fork() not found') forking._forking_is_enabled = bool(value) if not value: os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" else: warnings.warn(RuntimeWarning(W_NO_EXECV)) if os.environ.get("MULTIPROCESSING_FORKING_DISABLE"): forking_enable(False) billiard-3.3.0.15/billiard/_ext.py0000644000076500000000000000214112270022117017220 0ustar asksolwheel00000000000000from __future__ import absolute_import import sys supports_exec = True from .compat import _winapi as win32 # noqa if sys.platform.startswith("java"): _billiard = None else: try: import _billiard # noqa except ImportError: import _multiprocessing as _billiard # noqa supports_exec = False try: Connection = _billiard.Connection except AttributeError: # Py3 from billiard.connection import Connection # noqa PipeConnection = getattr(_billiard, "PipeConnection", None) def ensure_multiprocessing(): if _billiard is None: raise NotImplementedError("multiprocessing not supported") def ensure_SemLock(): try: from _billiard import SemLock # noqa except ImportError: try: from _multiprocessing import SemLock # noqa except ImportError: raise ImportError("""\ This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.""") billiard-3.3.0.15/billiard/_win.py0000644000076500000000000000576612270022117017235 0ustar asksolwheel00000000000000# -*- coding: utf-8 -*- """ billiard._win ~~~~~~~~~~~~~ Windows utilities to terminate process groups. """ from __future__ import absolute_import import os # psutil is painfully slow in win32. So to avoid adding big # dependencies like pywin32 a ctypes based solution is preferred # Code based on the winappdbg project http://winappdbg.sourceforge.net/ # (BSD License) from ctypes import ( byref, sizeof, windll, Structure, WinError, POINTER, c_size_t, c_char, c_void_p, ) from ctypes.wintypes import DWORD, LONG ERROR_NO_MORE_FILES = 18 INVALID_HANDLE_VALUE = c_void_p(-1).value class PROCESSENTRY32(Structure): _fields_ = [ ('dwSize', DWORD), ('cntUsage', DWORD), ('th32ProcessID', DWORD), ('th32DefaultHeapID', c_size_t), ('th32ModuleID', DWORD), ('cntThreads', DWORD), ('th32ParentProcessID', DWORD), ('pcPriClassBase', LONG), ('dwFlags', DWORD), ('szExeFile', c_char * 260), ] LPPROCESSENTRY32 = POINTER(PROCESSENTRY32) def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0): hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags, th32ProcessID) if hSnapshot == INVALID_HANDLE_VALUE: raise WinError() return hSnapshot def Process32First(hSnapshot, pe=None): return _Process32n(windll.kernel32.Process32First, hSnapshot, pe) def Process32Next(hSnapshot, pe=None): return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe) def _Process32n(fun, hSnapshot, pe=None): if pe is None: pe = PROCESSENTRY32() pe.dwSize = sizeof(PROCESSENTRY32) success = fun(hSnapshot, byref(pe)) if not success: if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES: return raise WinError() return pe def get_all_processes_pids(): """Return a dictionary with all processes pids as keys and their parents as value. Ignore processes with no parents. """ h = CreateToolhelp32Snapshot() parents = {} pe = Process32First(h) while pe: if pe.th32ParentProcessID: parents[pe.th32ProcessID] = pe.th32ParentProcessID pe = Process32Next(h, pe) return parents def get_processtree_pids(pid, include_parent=True): """Return a list with all the pids of a process tree""" parents = get_all_processes_pids() all_pids = list(parents.keys()) pids = set([pid]) while 1: pids_new = pids.copy() for _pid in all_pids: if parents[_pid] in pids: pids_new.add(_pid) if pids_new == pids: break pids = pids_new.copy() if not include_parent: pids.remove(pid) return list(pids) def kill_processtree(pid, signum): """Kill a process and all its descendants""" family_pids = get_processtree_pids(pid) for _pid in family_pids: os.kill(_pid, signum) billiard-3.3.0.15/billiard/common.py0000644000076500000000000000707312270022117017562 0ustar asksolwheel00000000000000# -*- coding: utf-8 -*- """ This module contains utilities added by billiard, to keep "non-core" functionality out of ``.util``.""" from __future__ import absolute_import import os import signal import sys import pickle as pypickle try: import cPickle as cpickle except ImportError: # pragma: no cover cpickle = None # noqa from .exceptions import RestartFreqExceeded from .five import monotonic if sys.version_info < (2, 6): # pragma: no cover # cPickle does not use absolute_imports pickle = pypickle pickle_load = pypickle.load pickle_loads = pypickle.loads else: pickle = cpickle or pypickle pickle_load = pickle.load pickle_loads = pickle.loads # cPickle.loads does not support buffer() objects, # but we can just create a StringIO and use load. if sys.version_info[0] == 3: from io import BytesIO else: try: from cStringIO import StringIO as BytesIO # noqa except ImportError: from StringIO import StringIO as BytesIO # noqa EX_SOFTWARE = 70 TERMSIGS_DEFAULT = ( 'SIGHUP', 'SIGQUIT', 'SIGTERM', 'SIGUSR1', 'SIGUSR2' ) TERMSIGS_FULL = ( 'SIGHUP', 'SIGQUIT', 'SIGTRAP', 'SIGABRT', 'SIGEMT', 'SIGSYS', 'SIGPIPE', 'SIGALRM', 'SIGTERM', 'SIGXCPU', 'SIGXFSZ', 'SIGVTALRM', 'SIGPROF', 'SIGUSR1', 'SIGUSR2', ) #: set by signal handlers just before calling exit. #: if this is true after the sighandler returns it means that something #: went wrong while terminating the process, and :func:`os._exit` #: must be called ASAP. _should_have_exited = [False] def pickle_loads(s, load=pickle_load): # used to support buffer objects return load(BytesIO(s)) def maybe_setsignal(signum, handler): try: signal.signal(signum, handler) except (OSError, AttributeError, ValueError, RuntimeError): pass def _shutdown_cleanup(signum, frame): # we will exit here so if the signal is received a second time # we can be sure that something is very wrong and we may be in # a crashing loop. if _should_have_exited[0]: os._exit(EX_SOFTWARE) maybe_setsignal(signum, signal.SIG_DFL) _should_have_exited[0] = True sys.exit(-(256 - signum)) def reset_signals(handler=_shutdown_cleanup, full=False): for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT: try: signum = getattr(signal, sig) except AttributeError: pass else: current = signal.getsignal(signum) if current is not None and current != signal.SIG_IGN: maybe_setsignal(signum, handler) class restart_state(object): RestartFreqExceeded = RestartFreqExceeded def __init__(self, maxR, maxT): self.maxR, self.maxT = maxR, maxT self.R, self.T = 0, None def step(self, now=None): now = monotonic() if now is None else now R = self.R if self.T and now - self.T >= self.maxT: # maxT passed, reset counter and time passed. self.T, self.R = now, 0 elif self.maxR and self.R >= self.maxR: # verify that R has a value as the result handler # resets this when a job is accepted. If a job is accepted # the startup probably went fine (startup restart burst # protection) if self.R: # pragma: no cover self.R = 0 # reset in case someone catches the error raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) # first run sets T if self.T is None: self.T = now self.R += 1 billiard-3.3.0.15/billiard/compat.py0000644000076500000000000000520512270022117017550 0ustar asksolwheel00000000000000from __future__ import absolute_import import errno import os import sys from .five import range if sys.platform == 'win32': try: import _winapi # noqa except ImportError: # pragma: no cover try: from _billiard import win32 as _winapi # noqa except (ImportError, AttributeError): from _multiprocessing import win32 as _winapi # noqa else: _winapi = None # noqa if sys.version_info > (2, 7, 5): buf_t, is_new_buffer = memoryview, True # noqa else: buf_t, is_new_buffer = buffer, False # noqa if hasattr(os, 'write'): __write__ = os.write if is_new_buffer: def send_offset(fd, buf, offset): return __write__(fd, buf[offset:]) else: # Py<2.7.6 def send_offset(fd, buf, offset): # noqa return __write__(fd, buf_t(buf, offset)) else: # non-posix platform def send_offset(fd, buf, offset): # noqa raise NotImplementedError('send_offset') if sys.version_info[0] == 3: bytes = bytes else: _bytes = bytes # the 'bytes' alias in Python2 does not support an encoding argument. class bytes(_bytes): # noqa def __new__(cls, *args): if len(args) > 1: return _bytes(args[0]).encode(*args[1:]) return _bytes(*args) try: closerange = os.closerange except AttributeError: def closerange(fd_low, fd_high): # noqa for fd in reversed(range(fd_low, fd_high)): try: os.close(fd) except OSError as exc: if exc.errno != errno.EBADF: raise def get_errno(exc): """:exc:`socket.error` and :exc:`IOError` first got the ``.errno`` attribute in Py2.7""" try: return exc.errno except AttributeError: try: # e.args = (errno, reason) if isinstance(exc.args, tuple) and len(exc.args) == 2: return exc.args[0] except AttributeError: pass return 0 if sys.platform == 'win32': def setblocking(handle, blocking): raise NotImplementedError('setblocking not implemented on win32') def isblocking(handle): raise NotImplementedError('isblocking not implemented on win32') else: from os import O_NONBLOCK from fcntl import fcntl, F_GETFL, F_SETFL def isblocking(handle): # noqa return not (fcntl(handle, F_GETFL) & O_NONBLOCK) def setblocking(handle, blocking): # noqa flags = fcntl(handle, F_GETFL, 0) fcntl( handle, F_SETFL, flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK, ) billiard-3.3.0.15/billiard/connection.py0000644000076500000000000000116012271741036020431 0ustar asksolwheel00000000000000from __future__ import absolute_import import sys is_pypy = hasattr(sys, 'pypy_version_info') if sys.version_info[0] == 3: from .py3 import connection else: from .py2 import connection # noqa if is_pypy: import _multiprocessing from .compat import setblocking, send_offset class Connection(_multiprocessing.Connection): def send_offset(self, buf, offset): return send_offset(self.fileno(), buf, offset) def setblocking(self, blocking): setblocking(self.fileno(), blocking) _multiprocessing.Connection = Connection sys.modules[__name__] = connection billiard-3.3.0.15/billiard/dummy/0000755000076500000000000000000012276217622017061 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/dummy/__init__.py0000644000076500000000000001077712270022117021171 0ustar asksolwheel00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # from __future__ import absolute_import __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event from billiard.five import Queue from billiard.connection import Pipe class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): assert self._parent is current_process() self._start_called = True self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None try: _Condition = threading._Condition except AttributeError: # Py3 _Condition = threading.Condition # noqa class Condition(_Condition): if sys.version_info[0] == 3: notify_all = _Condition.notifyAll else: notify_all = _Condition.notifyAll.__func__ Process = DummyProcess current_process = threading.currentThread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def _get(self): return self._value def _set(self, value): self._value = value value = property(_get, _set) def __repr__(self): return '<%r(%r, %r)>' % (type(self).__name__, self._typecode, self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from billiard.pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue billiard-3.3.0.15/billiard/dummy/connection.py0000644000076500000000000000561312270022117021562 0ustar asksolwheel00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # from __future__ import absolute_import __all__ = ['Client', 'Listener', 'Pipe'] from billiard.five import Queue families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None address = property(lambda self: self._backlog_queue) def __enter__(self): return self def __exit__(self, *exc_info): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False self._in.not_empty.acquire() self._in.not_empty.wait(timeout) self._in.not_empty.release() return self._in.qsize() > 0 def close(self): pass billiard-3.3.0.15/billiard/einfo.py0000644000076500000000000000550212270022117017365 0ustar asksolwheel00000000000000from __future__ import absolute_import import sys import traceback class _Code(object): def __init__(self, code): self.co_filename = code.co_filename self.co_name = code.co_name class _Frame(object): Code = _Code def __init__(self, frame): self.f_globals = { "__file__": frame.f_globals.get("__file__", "__main__"), "__name__": frame.f_globals.get("__name__"), "__loader__": None, } self.f_locals = fl = {} try: fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"] except KeyError: pass self.f_code = self.Code(frame.f_code) self.f_lineno = frame.f_lineno class _Object(object): def __init__(self, **kw): [setattr(self, k, v) for k, v in kw.items()] class _Truncated(object): def __init__(self): self.tb_lineno = -1 self.tb_frame = _Object( f_globals={"__file__": "", "__name__": "", "__loader__": None}, f_fileno=None, f_code=_Object(co_filename="...", co_name="[rest of traceback truncated]"), ) self.tb_next = None class Traceback(object): Frame = _Frame tb_frame = tb_lineno = tb_next = None max_frames = sys.getrecursionlimit() // 8 def __init__(self, tb, max_frames=None, depth=0): limit = self.max_frames = max_frames or self.max_frames self.tb_frame = self.Frame(tb.tb_frame) self.tb_lineno = tb.tb_lineno if tb.tb_next is not None: if depth <= limit: self.tb_next = Traceback(tb.tb_next, limit, depth + 1) else: self.tb_next = _Truncated() class ExceptionInfo(object): """Exception wrapping an exception and its traceback. :param exc_info: The exception info tuple as returned by :func:`sys.exc_info`. """ #: Exception type. type = None #: Exception instance. exception = None #: Pickleable traceback instance for use with :mod:`traceback` tb = None #: String representation of the traceback. traceback = None #: Set to true if this is an internal error. internal = False def __init__(self, exc_info=None, internal=False): self.type, self.exception, tb = exc_info or sys.exc_info() try: self.tb = Traceback(tb) self.traceback = ''.join( traceback.format_exception(self.type, self.exception, tb), ) self.internal = internal finally: del(tb) def __str__(self): return self.traceback def __repr__(self): return "" % (self.exception, ) @property def exc_info(self): return self.type, self.exception, self.tb billiard-3.3.0.15/billiard/exceptions.py0000644000076500000000000000242412270022117020446 0ustar asksolwheel00000000000000from __future__ import absolute_import try: from multiprocessing import ( ProcessError, BufferTooShort, TimeoutError, AuthenticationError, ) except ImportError: class ProcessError(Exception): # noqa pass class BufferTooShort(Exception): # noqa pass class TimeoutError(Exception): # noqa pass class AuthenticationError(Exception): # noqa pass class TimeLimitExceeded(Exception): """The time limit has been exceeded and the job has been terminated.""" def __str__(self): return "TimeLimitExceeded%s" % (self.args, ) class SoftTimeLimitExceeded(Exception): """The soft time limit has been exceeded. This exception is raised to give the task a chance to clean up.""" def __str__(self): return "SoftTimeLimitExceeded%s" % (self.args, ) class WorkerLostError(Exception): """The worker processing a job has exited prematurely.""" class Terminated(Exception): """The worker processing a job has been terminated by user request.""" class RestartFreqExceeded(Exception): """Restarts too fast.""" class CoroStop(Exception): """Coroutine exit, as opposed to StopIteration which may mean it should be restarted.""" pass billiard-3.3.0.15/billiard/five.py0000644000076500000000000001234112270022117017215 0ustar asksolwheel00000000000000# -*- coding: utf-8 -*- """ celery.five ~~~~~~~~~~~ Compatibility implementations of features only available in newer Python versions. """ from __future__ import absolute_import ############## py3k ######################################################### import sys PY3 = sys.version_info[0] == 3 try: reload = reload # noqa except NameError: # pragma: no cover from imp import reload # noqa try: from UserList import UserList # noqa except ImportError: # pragma: no cover from collections import UserList # noqa try: from UserDict import UserDict # noqa except ImportError: # pragma: no cover from collections import UserDict # noqa ############## time.monotonic ################################################ if sys.version_info < (3, 3): import platform SYSTEM = platform.system() if SYSTEM == 'Darwin': import ctypes from ctypes.util import find_library libSystem = ctypes.CDLL('libSystem.dylib') CoreServices = ctypes.CDLL(find_library('CoreServices'), use_errno=True) mach_absolute_time = libSystem.mach_absolute_time mach_absolute_time.restype = ctypes.c_uint64 absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds absolute_to_nanoseconds.restype = ctypes.c_uint64 absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] def _monotonic(): return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 elif SYSTEM == 'Linux': # from stackoverflow: # questions/1205722/how-do-i-get-monotonic-time-durations-in-python import ctypes import os CLOCK_MONOTONIC = 1 # see class timespec(ctypes.Structure): _fields_ = [ ('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long), ] librt = ctypes.CDLL('librt.so.1', use_errno=True) clock_gettime = librt.clock_gettime clock_gettime.argtypes = [ ctypes.c_int, ctypes.POINTER(timespec), ] def _monotonic(): # noqa t = timespec() if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: errno_ = ctypes.get_errno() raise OSError(errno_, os.strerror(errno_)) return t.tv_sec + t.tv_nsec * 1e-9 else: from time import time as _monotonic try: from time import monotonic except ImportError: monotonic = _monotonic # noqa if PY3: import builtins from queue import Queue, Empty, Full from itertools import zip_longest from io import StringIO, BytesIO map = map string = str string_t = str long_t = int text_t = str range = range int_types = (int, ) open_fqdn = 'builtins.open' def items(d): return d.items() def keys(d): return d.keys() def values(d): return d.values() def nextfun(it): return it.__next__ exec_ = getattr(builtins, 'exec') def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value class WhateverIO(StringIO): def write(self, data): if isinstance(data, bytes): data = data.encode() StringIO.write(self, data) else: import __builtin__ as builtins # noqa from Queue import Queue, Empty, Full # noqa from itertools import imap as map, izip_longest as zip_longest # noqa from StringIO import StringIO # noqa string = unicode # noqa string_t = basestring # noqa text_t = unicode long_t = long # noqa range = xrange int_types = (int, long) open_fqdn = '__builtin__.open' def items(d): # noqa return d.iteritems() def keys(d): # noqa return d.iterkeys() def values(d): # noqa return d.itervalues() def nextfun(it): # noqa return it.next def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") BytesIO = WhateverIO = StringIO # noqa def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): """Class decorator to set metaclass. Works with both Python 3 and Python 3 and it does not add an extra class in the lookup order like ``six.with_metaclass`` does (that is -- it copies the original class instead of using inheritance). """ def _clone_with_metaclass(Class): attrs = dict((key, value) for key, value in items(vars(Class)) if key not in skip_attrs) return Type(Class.__name__, Class.__bases__, attrs) return _clone_with_metaclass billiard-3.3.0.15/billiard/forking.py0000644000076500000000000004215312270022117017727 0ustar asksolwheel00000000000000# # Module for starting a process object using os.fork() or CreateProcess() # # multiprocessing/forking.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import os import sys import signal import warnings from pickle import load, HIGHEST_PROTOCOL from billiard import util from billiard import process from billiard.five import int_types from .reduction import dump from .compat import _winapi as win32 __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close'] try: WindowsError = WindowsError # noqa except NameError: class WindowsError(Exception): # noqa pass W_OLD_DJANGO_LAYOUT = """\ Will add directory %r to path! This is necessary to accommodate \ pre-Django 1.4 layouts using setup_environ. You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \ environment variable. """ # # Choose whether to do a fork or spawn (fork+exec) on Unix. # This affects how some shared resources should be created. # _forking_is_enabled = sys.platform != 'win32' # # Check that the current thread is spawning a child process # def assert_spawning(self): if not Popen.thread_is_spawning(): raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(self).__name__ ) # # Unix # if sys.platform != 'win32': try: import thread except ImportError: import _thread as thread # noqa import select WINEXE = False WINSERVICE = False exit = os._exit duplicate = os.dup close = os.close _select = util._eintr_retry(select.select) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): _tls = thread._local() def __init__(self, process_obj): # register reducers from billiard import connection # noqa _Django_old_layout_hack__save() sys.stdout.flush() sys.stderr.flush() self.returncode = None r, w = os.pipe() self.sentinel = r if _forking_is_enabled: self.pid = os.fork() if self.pid == 0: os.close(r) if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() os._exit(code) else: from_parent_fd, to_child_fd = os.pipe() cmd = get_command_line() + [str(from_parent_fd)] self.pid = os.fork() if self.pid == 0: os.close(r) os.close(to_child_fd) os.execv(sys.executable, cmd) # send information to child prep_data = get_preparation_data(process_obj._name) os.close(from_parent_fd) to_child = os.fdopen(to_child_fd, 'wb') Popen._tls.process_handle = self.pid try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del(Popen._tls.process_handle) to_child.close() # `w` will be closed when the child exits, at which point `r` # will become ready for reading (using e.g. select()). os.close(w) util.Finalize(self, os.close, (r,)) def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except os.error: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: r = _select([self.sentinel], [], [], timeout)[0] if not r: return None # This shouldn't block if select() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError: if self.wait(timeout=0.1) is None: raise @staticmethod def thread_is_spawning(): if _forking_is_enabled: return False else: return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return handle # # Windows # else: try: import thread except ImportError: import _thread as thread # noqa import msvcrt try: import _subprocess except ImportError: import _winapi as _subprocess # noqa # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") exit = win32.ExitProcess close = win32.CloseHandle # # # def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() h = _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS ) if sys.version_info[0] < 3 or ( sys.version_info[0] == 3 and sys.version_info[1] < 3): h = h.Detach() return h # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' _tls = thread._local() def __init__(self, process_obj): _Django_old_layout_hack__save() # create pipe for communication with child rfd, wfd = os.pipe() # get handle for read end of the pipe and make it inheritable rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) os.close(rfd) # start process cmd = get_command_line() + [rhandle] cmd = ' '.join('"%s"' % x for x in cmd) hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) close(ht) if isinstance(ht, int_types) else ht.Close() (close(rhandle) if isinstance(rhandle, int_types) else rhandle.Close()) # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) # send information to child prep_data = get_preparation_data(process_obj._name) to_child = os.fdopen(wfd, 'wb') Popen._tls.process_handle = int(hp) try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del Popen._tls.process_handle to_child.close() @staticmethod def thread_is_spawning(): return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return duplicate(handle, Popen._tls.process_handle) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _subprocess.TerminateProcess(int(self._handle), TERMINATE) except WindowsError: if self.wait(timeout=0.1) is None: raise # # # if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--billiard-fork': assert len(argv) == 3 os.environ["FORKED_BY_MULTIPROCESSING"] = "1" return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): main() sys.exit() def get_command_line(): ''' Returns prefix of command line used for spawning a child process ''' if process.current_process()._identity == () and is_forking(sys.argv): raise RuntimeError(''' Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.''') if getattr(sys, 'frozen', False): return [sys.executable, '--billiard-fork'] else: prog = 'from billiard.forking import main; main()' return [_python_exe, '-c', prog, '--billiard-fork'] def _Django_old_layout_hack__save(): if 'DJANGO_PROJECT_DIR' not in os.environ: try: settings_name = os.environ['DJANGO_SETTINGS_MODULE'] except KeyError: return # not using Django. conf_settings = sys.modules.get('django.conf.settings') configured = conf_settings and conf_settings.configured try: project_name, _ = settings_name.split('.', 1) except ValueError: return # not modified by setup_environ project = __import__(project_name) try: project_dir = os.path.normpath(_module_parent_dir(project)) except AttributeError: return # dynamically generated module (no __file__) if configured: warnings.warn(UserWarning( W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir) )) os.environ['DJANGO_PROJECT_DIR'] = project_dir def _Django_old_layout_hack__load(): try: sys.path.append(os.environ['DJANGO_PROJECT_DIR']) except KeyError: pass def _module_parent_dir(mod): dir, filename = os.path.split(_module_dir(mod)) if dir == os.curdir or not dir: dir = os.getcwd() return dir def _module_dir(mod): if '__init__.py' in mod.__file__: return os.path.dirname(mod.__file__) return mod.__file__ def main(): ''' Run code specifed by data received over pipe ''' global _forking_is_enabled _Django_old_layout_hack__load() assert is_forking(sys.argv) _forking_is_enabled = False handle = int(sys.argv[-1]) if sys.platform == 'win32': fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) else: fd = handle from_parent = os.fdopen(fd, 'rb') process.current_process()._inheriting = True preparation_data = load(from_parent) prepare(preparation_data) # Huge hack to make logging before Process.run work. try: os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ except KeyError: pass except AttributeError: pass loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") logfile = os.environ.get("_MP_FORK_LOGFILE_") or None format = os.environ.get("_MP_FORK_LOGFORMAT_") if loglevel: from billiard import util import logging logger = util.get_logger() logger.setLevel(int(loglevel)) if not logger.handlers: logger._rudimentary_setup = True logfile = logfile or sys.__stderr__ if hasattr(logfile, "write"): handler = logging.StreamHandler(logfile) else: handler = logging.FileHandler(logfile) formatter = logging.Formatter( format or util.DEFAULT_LOGGING_FORMAT, ) handler.setFormatter(formatter) logger.addHandler(handler) self = load(from_parent) process.current_process()._inheriting = False from_parent.close() exitcode = self._bootstrap() exit(exitcode) def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' from billiard.util import _logger, _log_to_stderr d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE and not WINSERVICE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['main_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' old_main_modules.append(sys.modules['__main__']) if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process()._authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'main_path' in data: main_path = data['main_path'] main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == '__init__': main_name = os.path.basename(os.path.dirname(main_path)) if main_name == '__main__': main_module = sys.modules['__main__'] main_module.__file__ = main_path elif main_name != 'ipython': # Main modules not actually called __main__.py may # contain additional code that should still be executed import imp if main_path is None: dirs = None elif os.path.basename(main_path).startswith('__init__.py'): dirs = [os.path.dirname(os.path.dirname(main_path))] else: dirs = [os.path.dirname(main_path)] assert main_name not in sys.modules, main_name file, path_name, etc = imp.find_module(main_name, dirs) try: # We would like to do "imp.load_module('__main__', ...)" # here. However, that would cause 'if __name__ == # "__main__"' clauses to be executed. main_module = imp.load_module( '__parents_main__', file, path_name, etc ) finally: if file: file.close() sys.modules['__main__'] = main_module main_module.__name__ = '__main__' # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. for obj in list(main_module.__dict__.values()): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' except Exception: pass billiard-3.3.0.15/billiard/heap.py0000644000076500000000000001753212270022117017210 0ustar asksolwheel00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import bisect import mmap import os import sys import threading import itertools from ._ext import _billiard, win32 from .util import Finalize, info, get_temp_dir from .forking import assert_spawning from .reduction import ForkingPickler __all__ = ['BufferWrapper'] try: maxsize = sys.maxsize except AttributeError: maxsize = sys.maxint # # Inheirtable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': class Arena(object): _counter = itertools.count() def __init__(self, size): self.size = size self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter)) self.buffer = mmap.mmap(-1, self.size, tagname=self.name) assert win32.GetLastError() == 0, 'tagname already in use' self._state = (self.size, self.name) def __getstate__(self): assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state self.buffer = mmap.mmap(-1, self.size, tagname=self.name) assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS else: class Arena(object): _counter = itertools.count() def __init__(self, size, fileno=-1): from .forking import _forking_is_enabled self.size = size self.fileno = fileno if fileno == -1 and not _forking_is_enabled: name = os.path.join( get_temp_dir(), 'pym-%d-%d' % (os.getpid(), next(self._counter))) self.fileno = os.open( name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) os.unlink(name) os.ftruncate(self.fileno, size) self.buffer = mmap.mmap(self.fileno, self.size) def reduce_arena(a): if a.fileno == -1: raise ValueError('Arena is unpicklable because' 'forking was enabled when it was created') return Arena, (a.size, a.fileno) ForkingPickler.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): _alignment = 8 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() self._size = size self._lengths = [] self._len_to_seq = {} self._start_to_block = {} self._stop_to_block = {} self._allocated_blocks = set() self._arenas = [] # list of pending blocks to free - see free() comment below self._pending_free_blocks = [] @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): length = self._roundup(max(self._size, size), mmap.PAGESIZE) self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _free(self, block): # free location and try to merge with neighbours (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held while 1: try: block = self._pending_free_blocks.pop() except IndexError: break self._allocated_blocks.remove(block) self._free(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under cPython it's atomic thanks # to the GIL). assert os.getpid() == self._lastpid if not self._lock.acquire(False): # can't aquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._free_pending_blocks() self._allocated_blocks.remove(block) self._free(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) assert 0 <= size < maxsize if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork self._lock.acquire() self._free_pending_blocks() try: size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) new_stop = start + size if new_stop < stop: self._free((arena, new_stop, stop)) block = (arena, start, new_stop) self._allocated_blocks.add(block) return block finally: self._lock.release() # # Class representing a chunk of an mmap -- can be inherited # class BufferWrapper(object): _heap = Heap() def __init__(self, size): assert 0 <= size < maxsize block = BufferWrapper._heap.malloc(size) self._state = (block, size) Finalize(self, BufferWrapper._heap.free, args=(block,)) def get_address(self): (arena, start, stop), size = self._state address, length = _billiard.address_of_buffer(arena.buffer) assert size <= length return address + start def get_size(self): return self._state[1] billiard-3.3.0.15/billiard/managers.py0000644000076500000000000010573212270022117020070 0ustar asksolwheel00000000000000# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] # # Imports # import sys import threading import array from traceback import format_exc from . import Process, current_process, active_children, Pool, util, connection from .five import Queue, items, monotonic from .process import AuthenticationString from .forking import exit, Popen from .reduction import ForkingPickler from .util import Finalize, error, info # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tostring()) ForkingPickler.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items', 'keys', 'values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj), ) for view_type in view_types: ForkingPickler.register(view_type, rebuild_as_list) try: import copyreg except ImportError: pass else: copyreg.pickle(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely indentify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return 'Token(typeid=%r, address=%r, id=%r)' % \ (self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return ('\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.mutex = threading.RLock() self.stop = 0 def serve_forever(self): ''' Run the server forever ''' current_process()._manager_server = self try: try: while 1: try: c = self.listener.accept() except (OSError, IOError): continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() except (KeyboardInterrupt, SystemExit): pass finally: self.stop = 999 self.listener.close() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as exc: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass info('Failure to send message: %r', msg) info(' ... request was %r', request) info(' ... exception was %r', exc) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.currentThread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop: try: methodname = obj = None request = recv() ident, methodname, args, kwds = request obj, exposed, gettypeid = id_to_obj[ident] if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % ( methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as exc: msg = ('#ERROR', exc) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.currentThread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', repr(msg))) except Exception as exc: info('exception in thread serving %r', threading.currentThread().name) info(' ... message was %r', msg) info(' ... exception was %r', exc) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__': fallback_str, '__repr__': fallback_repr, '#GETVALUE': fallback_getvalue, } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' with self.mutex: result = [] keys = list(self.id_to_obj.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' return len(self.id_to_obj) - 1 # don't count ident='0' def shutdown(self, c): ''' Shutdown this process ''' try: try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ util._run_finalizers(0) for p in active_children(): util.debug('terminating a child process of manager') p.terminate() for p in active_children(): util.debug('terminating a child process of manager') p.join() util._run_finalizers() info('manager exiting with exitcode 0') except: if not error("Error while manager shutdown", exc_info=True): import traceback traceback.print_exc() finally: exit(0) def create(self, c, typeid, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 # increment the reference count immediately, to avoid # this object being garbage collected before a Proxy # object for it can be created. The caller of create() # is responsible for doing a decref once the Proxy object # has been created. self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.currentThread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: self.id_to_refcount[ident] += 1 def decref(self, c, ident): with self.mutex: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_obj[ident], self.id_to_refcount[ident] util.debug('disposing of obj with id %r', ident) # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle': (connection.Listener, connection.Client), 'xmlrpclib': (connection.XmlListener, connection.XmlClient), } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle'): if authkey is None: authkey = current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] def __reduce__(self): return (type(self).from_address, (self._address, self._authkey, self._serializer)) def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,) + args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' self._process.join(timeout) def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=0.2) if process.is_alive(): info('manager still alive') if hasattr(process, 'terminate'): info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = ( method_to_typeid or getattr(proxytype, '_method_to_typeid_', None) ) if method_to_typeid: for key, value in items(method_to_typeid): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True): BaseProxy._mutex.acquire() try: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset finally: BaseProxy._mutex.release() # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] if authkey is not None: self._authkey = AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = current_process().name if threading.currentThread().name != 'MainThread': name += '|' + threading.currentThread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referrent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.currentThread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as exc: util.debug('... decref failed %s', exc) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.currentThread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as exc: # the proxy may just be for a manager which has shutdown info('incref failed: %s', exc) def __reduce__(self): kwds = {} if Popen.thread_is_spawning(): kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %s>' % \ (type(self).__name__, self._token.typeid, '0x%x' % id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return an proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)' % (type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): if sys.version_info[0] == 3: _exposed = ('__next__', 'send', 'throw', 'close') else: _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') def next(self, *args): return self._callmethod('next', args) def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True): return self._callmethod('acquire', (blocking,)) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__delslice__', '__getitem__', '__getslice__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', '__setslice__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__', )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values', )) ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__', )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 PoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) PoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator', } # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `billiard.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', Queue) SyncManager.register('JoinableQueue', Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Pool', Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) billiard-3.3.0.15/billiard/pool.py0000644000076500000000000017424612274745466017301 0ustar asksolwheel00000000000000# -*- coding: utf-8 -*- # # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import # # Imports # import errno import itertools import os import platform import signal import sys import threading import time import warnings from collections import deque from functools import partial from . import Event, Process, cpu_count from . import util from .common import pickle_loads, reset_signals, restart_state from .compat import get_errno, send_offset from .einfo import ExceptionInfo from .exceptions import ( CoroStop, RestartFreqExceeded, SoftTimeLimitExceeded, Terminated, TimeLimitExceeded, TimeoutError, WorkerLostError, ) from .five import Empty, Queue, range, values, reraise, monotonic from .util import Finalize, debug PY3 = sys.version_info[0] == 3 if platform.system() == 'Windows': # pragma: no cover # On Windows os.kill calls TerminateProcess which cannot be # handled by # any process, so this is needed to terminate the task # *and its children* (if any). from ._win import kill_processtree as _kill # noqa else: from os import kill as _kill # noqa try: TIMEOUT_MAX = threading.TIMEOUT_MAX except AttributeError: # pragma: no cover TIMEOUT_MAX = 1e10 # noqa if sys.version_info >= (3, 3): _Semaphore = threading.Semaphore else: # Semaphore is a factory function pointing to _Semaphore _Semaphore = threading._Semaphore # noqa SIGMAP = dict( (getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') ) # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Constants representing the state of a job # ACK = 0 READY = 1 TASK = 2 NACK = 3 DEATH = 4 # # Exit code constants # EX_OK = 0 EX_FAILURE = 1 EX_RECYCLE = 0x9B # Signal used for soft time limits. SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None) # # Miscellaneous # LOST_WORKER_TIMEOUT = 10.0 EX_OK = getattr(os, "EX_OK", 0) job_counter = itertools.count() Lock = threading.Lock def _get_send_offset(connection): try: native = connection.send_offset except AttributeError: native = None if native is None: return partial(send_offset, connection.fileno()) return native def human_status(status): if status < 0: try: return 'signal {0} ({1})'.format(-status, SIGMAP[-status]) except KeyError: return 'signal {0}'.format(-status) return 'exitcode {0}'.format(status) def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) def error(msg, *args, **kwargs): if util._logger: util._logger.error(msg, *args, **kwargs) def stop_if_not_current(thread, timeout=None): if thread is not threading.currentThread(): thread.stop(timeout) class LaxBoundedSemaphore(_Semaphore): """Semaphore that checks that # release is <= # acquires, but ignores if # releases >= value.""" def __init__(self, value=1, verbose=None): if PY3: _Semaphore.__init__(self, value) else: _Semaphore.__init__(self, value, verbose) self._initial_value = value def grow(self): if PY3: cond = self._cond else: cond = self._Semaphore__cond with cond: self._initial_value += 1 self._Semaphore__value += 1 cond.notify() def shrink(self): self._initial_value -= 1 self.acquire() if PY3: def release(self): cond = self._cond with cond: if self._value < self._initial_value: self._value += 1 cond.notify_all() def clear(self): while self._value < self._initial_value: _Semaphore.release(self) else: def release(self): # noqa cond = self._Semaphore__cond with cond: if self._Semaphore__value < self._initial_value: self._Semaphore__value += 1 cond.notifyAll() def clear(self): # noqa while self._Semaphore__value < self._initial_value: _Semaphore.release(self) # # Exceptions # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __repr__(self): return "" % str(self) def __str__(self): return "Error sending result: '%r'. Reason: '%r'." % ( self.value, self.exc) class WorkersJoined(Exception): """All workers have terminated.""" def soft_timeout_sighandler(signum, frame): raise SoftTimeLimitExceeded() # # Code run by worker processes # class Worker(Process): _controlled_termination = False _job_terminated = False def __init__(self, inq, outq, synq=None, initializer=None, initargs=(), maxtasks=None, sentinel=None, on_exit=None, sigprotection=True): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) self.initializer = initializer self.initargs = initargs self.maxtasks = maxtasks self._shutdown = sentinel self.on_exit = on_exit self.sigprotection = sigprotection self.inq, self.outq, self.synq = inq, outq, synq self._make_shortcuts() super(Worker, self).__init__() def __reduce__(self): return self.__class__, ( self.inq, self.outq, self.synq, self.initializer, self.initargs, self.maxtasks, self._shutdown, ) def _make_shortcuts(self): self.inqW_fd = self.inq._writer.fileno() # inqueue write fd self.outqR_fd = self.outq._reader.fileno() # outqueue read fd if self.synq: self.synqR_fd = self.synq._reader.fileno() # synqueue read fd self.synqW_fd = self.synq._writer.fileno() # synqueue write fd self.send_syn_offset = _get_send_offset(self.synq._writer) else: self.synqR_fd = self.synqW_fd = self._send_syn_offset = None self._quick_put = self.inq._writer.send self._quick_get = self.outq._reader.recv self.send_job_offset = _get_send_offset(self.inq._writer) def run(self): _exit = sys.exit _exitcode = [None] def exit(status=None): _exitcode[0] = status return _exit() sys.exit = exit pid = os.getpid() self._make_child_methods() self.after_fork() self.on_loop_start(pid=pid) # callback on loop start try: sys.exit(self.workloop(pid=pid)) except Exception as exc: error('Pool process %r error: %r', self, exc, exc_info=1) self._do_exit(pid, _exitcode[0], exc) finally: self._do_exit(pid, _exitcode[0], None) def _do_exit(self, pid, exitcode, exc=None): if exitcode is None: exitcode = EX_FAILURE if exc else EX_OK if self.on_exit is not None: self.on_exit(pid, exitcode) if sys.platform != 'win32': try: self.outq.put((DEATH, (pid, exitcode))) time.sleep(1) finally: os._exit(exitcode) else: os._exit(exitcode) def on_loop_start(self, pid): pass def terminate_controlled(self): self._controlled_termination = True self.terminate() def prepare_result(self, result): return result def workloop(self, debug=debug, now=monotonic, pid=None): pid = pid or os.getpid() put = self.outq.put inqW_fd = self.inqW_fd synqW_fd = self.synqW_fd maxtasks = self.maxtasks prepare_result = self.prepare_result wait_for_job = self.wait_for_job _wait_for_syn = self.wait_for_syn def wait_for_syn(jid): i = 0 while 1: if i > 60: error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!', jid, self.synq._reader.fileno(), exc_info=1) req = _wait_for_syn() if req: type_, args = req if type_ == NACK: return False assert type_ == ACK return True i += 1 completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): req = wait_for_job() if req: type_, args_ = req assert type_ == TASK job, i, fun, args, kwargs = args_ put((ACK, (job, i, now(), pid, synqW_fd))) if _wait_for_syn: confirm = wait_for_syn(job) if not confirm: continue # received NACK try: result = (True, prepare_result(fun(*args, **kwargs))) except Exception: result = (False, ExceptionInfo()) try: put((READY, (job, i, result, inqW_fd))) except Exception as exc: _, _, tb = sys.exc_info() try: wrapped = MaybeEncodingError(exc, result[1]) einfo = ExceptionInfo(( MaybeEncodingError, wrapped, tb, )) put((READY, (job, i, (False, einfo), inqW_fd))) finally: del(tb) completed += 1 debug('worker exiting after %d tasks', completed) if maxtasks: return EX_RECYCLE if completed == maxtasks else EX_FAILURE return EX_OK def after_fork(self): if hasattr(self.inq, '_writer'): self.inq._writer.close() if hasattr(self.outq, '_reader'): self.outq._reader.close() if self.initializer is not None: self.initializer(*self.initargs) # Make sure all exiting signals call finally: blocks. # This is important for the semaphore to be released. reset_signals(full=self.sigprotection) # install signal handler for soft timeouts. if SIG_SOFT_TIMEOUT is not None: signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) try: signal.signal(signal.SIGINT, signal.SIG_IGN) except AttributeError: pass def _make_recv_method(self, conn): get = conn.get if hasattr(conn, '_reader'): _poll = conn._reader.poll if hasattr(conn, 'get_payload') and conn.get_payload: get_payload = conn.get_payload def _recv(timeout, loads=pickle_loads): return True, loads(get_payload()) else: def _recv(timeout): # noqa if _poll(timeout): return True, get() return False, None else: def _recv(timeout): # noqa try: return True, get(timeout=timeout) except Queue.Empty: return False, None return _recv def _make_child_methods(self, loads=pickle_loads): self.wait_for_job = self._make_protected_receive(self.inq) self.wait_for_syn = (self._make_protected_receive(self.synq) if self.synq else None) def _make_protected_receive(self, conn): _receive = self._make_recv_method(conn) should_shutdown = self._shutdown.is_set if self._shutdown else None def receive(debug=debug): if should_shutdown and should_shutdown(): debug('worker got sentinel -- exiting') raise SystemExit(EX_OK) try: ready, req = _receive(1.0) if not ready: return None except (EOFError, IOError) as exc: if get_errno(exc) == errno.EINTR: return None # interrupted, maybe by gdb debug('worker got %s -- exiting', type(exc).__name__) raise SystemExit(EX_FAILURE) if req is None: debug('worker got sentinel -- exiting') raise SystemExit(EX_FAILURE) return req return receive # # Class representing a process pool # class PoolThread(threading.Thread): def __init__(self, *args, **kwargs): threading.Thread.__init__(self) self._state = RUN self._was_started = False self.daemon = True def run(self): try: return self.body() except RestartFreqExceeded as exc: error("Thread %r crashed: %r", type(self).__name__, exc, exc_info=1) _kill(os.getpid(), signal.SIGTERM) sys.exit() except Exception as exc: error("Thread %r crashed: %r", type(self).__name__, exc, exc_info=1) os._exit(1) def start(self, *args, **kwargs): self._was_started = True super(PoolThread, self).start(*args, **kwargs) def on_stop_not_started(self): pass def stop(self, timeout=None): if self._was_started: self.join(timeout) return self.on_stop_not_started() def terminate(self): self._state = TERMINATE def close(self): self._state = CLOSE class Supervisor(PoolThread): def __init__(self, pool): self.pool = pool super(Supervisor, self).__init__() def body(self): debug('worker handler starting') time.sleep(0.8) pool = self.pool try: # do a burst at startup to verify that we can start # our pool processes, and in that time we lower # the max restart frequency. prev_state = pool.restart_state pool.restart_state = restart_state(10 * pool._processes, 1) for _ in range(10): if self._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.1) # Keep maintaing workers until the cache gets drained, unless # the pool is termianted pool.restart_state = prev_state while self._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.8) except RestartFreqExceeded: pool.close() pool.join() raise debug('worker handler exiting') class TaskHandler(PoolThread): def __init__(self, taskqueue, put, outqueue, pool): self.taskqueue = taskqueue self.put = put self.outqueue = outqueue self.pool = pool super(TaskHandler, self).__init__() def body(self): taskqueue = self.taskqueue put = self.put for taskseq, set_length in iter(taskqueue.get, None): try: i = -1 for i, task in enumerate(taskseq): if self._state: debug('task handler found thread._state != RUN') break try: put(task) except IOError: debug('could not put task on queue') break else: if set_length: debug('doing set_length()') set_length(i + 1) continue break except Exception as exc: error('Task Handler ERROR: %r', exc, exc_info=1) break else: debug('task handler got sentinel') self.tell_others() def tell_others(self): outqueue = self.outqueue put = self.put pool = self.pool try: # tell result handler to finish when cache is empty debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work debug('task handler sending sentinel to workers') for p in pool: put(None) except IOError: debug('task handler got IOError when sending sentinels') debug('task handler exiting') def on_stop_not_started(self): self.tell_others() class TimeoutHandler(PoolThread): def __init__(self, processes, cache, t_soft, t_hard): self.processes = processes self.cache = cache self.t_soft = t_soft self.t_hard = t_hard self._it = None super(TimeoutHandler, self).__init__() def _process_by_pid(self, pid): return next(( (proc, i) for i, proc in enumerate(self.processes) if proc.pid == pid ), (None, None)) def on_soft_timeout(self, job): debug('soft time limit exceeded for %r', job) process, _index = self._process_by_pid(job._worker_pid) if not process: return # Run timeout callback if job._timeout_callback is not None: job._timeout_callback(soft=True, timeout=job._soft_timeout) try: _kill(job._worker_pid, SIG_SOFT_TIMEOUT) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise def on_hard_timeout(self, job): if job.ready(): return debug('hard time limit exceeded for %r', job) # Remove from cache and set return value to an exception try: raise TimeLimitExceeded(job._timeout) except TimeLimitExceeded: job._set(job._job, (False, ExceptionInfo())) else: # pragma: no cover pass # Remove from _pool process, _index = self._process_by_pid(job._worker_pid) # Run timeout callback if job._timeout_callback is not None: job._timeout_callback(soft=False, timeout=job._timeout) if process: self._trywaitkill(process) def _trywaitkill(self, worker): debug('timeout: sending TERM to %s', worker._name) try: worker.terminate() except OSError: pass else: if worker._popen.wait(timeout=0.1): return debug('timeout: TERM timed-out, now sending KILL to %s', worker._name) try: _kill(worker.pid, signal.SIGKILL) except OSError: pass def handle_timeouts(self): cache = self.cache t_hard, t_soft = self.t_hard, self.t_soft dirty = set() on_soft_timeout = self.on_soft_timeout on_hard_timeout = self.on_hard_timeout def _timed_out(start, timeout): if not start or not timeout: return False if monotonic() >= start + timeout: return True # Inner-loop while self._state == RUN: # Remove dirty items not in cache anymore if dirty: dirty = set(k for k in dirty if k in cache) for i, job in list(cache.items()): ack_time = job._time_accepted soft_timeout = job._soft_timeout if soft_timeout is None: soft_timeout = t_soft hard_timeout = job._timeout if hard_timeout is None: hard_timeout = t_hard if _timed_out(ack_time, hard_timeout): on_hard_timeout(job) elif i not in dirty and _timed_out(ack_time, soft_timeout): on_soft_timeout(job) dirty.add(i) yield def body(self): while self._state == RUN: try: for _ in self.handle_timeouts(): time.sleep(1.0) # don't spin except CoroStop: break debug('timeout handler exiting') def handle_event(self, *args): if self._it is None: self._it = self.handle_timeouts() try: next(self._it) except StopIteration: self._it = None class ResultHandler(PoolThread): def __init__(self, outqueue, get, cache, poll, join_exited_workers, putlock, restart_state, check_timeouts, on_job_ready): self.outqueue = outqueue self.get = get self.cache = cache self.poll = poll self.join_exited_workers = join_exited_workers self.putlock = putlock self.restart_state = restart_state self._it = None self._shutdown_complete = False self.check_timeouts = check_timeouts self.on_job_ready = on_job_ready self._make_methods() super(ResultHandler, self).__init__() def on_stop_not_started(self): # used when pool started without result handler thread. self.finish_at_shutdown(handle_timeouts=True) def _make_methods(self): cache = self.cache putlock = self.putlock restart_state = self.restart_state on_job_ready = self.on_job_ready def on_ack(job, i, time_accepted, pid, synqW_fd): restart_state.R = 0 try: cache[job]._ack(i, time_accepted, pid, synqW_fd) except (KeyError, AttributeError): # Object gone or doesn't support _ack (e.g. IMAPIterator). pass def on_ready(job, i, obj, inqW_fd): if on_job_ready is not None: on_job_ready(job, i, obj, inqW_fd) try: item = cache[job] except KeyError: return if not item.ready(): if putlock is not None: putlock.release() try: item._set(i, obj) except KeyError: pass def on_death(pid, exitcode): try: os.kill(pid, signal.SIGTERM) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise state_handlers = self.state_handlers = { ACK: on_ack, READY: on_ready, DEATH: on_death } def on_state_change(task): state, args = task try: state_handlers[state](*args) except KeyError: debug("Unknown job state: %s (args=%s)", state, args) self.on_state_change = on_state_change def _process_result(self, timeout=1.0): poll = self.poll on_state_change = self.on_state_change while 1: try: ready, task = poll(timeout) except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) raise CoroStop() if self._state: assert self._state == TERMINATE debug('result handler found thread._state=TERMINATE') raise CoroStop() if ready: if task is None: debug('result handler got sentinel') raise CoroStop() on_state_change(task) if timeout != 0: # blocking break else: break yield def handle_event(self, fileno=None, events=None): if self._state == RUN: if self._it is None: self._it = self._process_result(0) # non-blocking try: next(self._it) except (StopIteration, CoroStop): self._it = None def body(self): debug('result handler starting') try: while self._state == RUN: try: for _ in self._process_result(1.0): # blocking pass except CoroStop: break finally: self.finish_at_shutdown() def finish_at_shutdown(self, handle_timeouts=False): self._shutdown_complete = True get = self.get outqueue = self.outqueue cache = self.cache poll = self.poll join_exited_workers = self.join_exited_workers check_timeouts = self.check_timeouts on_state_change = self.on_state_change time_terminate = None while cache and self._state != TERMINATE: if check_timeouts is not None: check_timeouts() try: ready, task = poll(1.0) except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) return if ready: if task is None: debug('result handler ignoring extra sentinel') continue on_state_change(task) try: join_exited_workers(shutdown=True) except WorkersJoined: now = monotonic() if not time_terminate: time_terminate = now else: if now - time_terminate > 5.0: debug('result handler exiting: timed out') break debug('result handler: all workers terminated, ' 'timeout in %ss', abs(min(now - time_terminate - 5.0, 0))) if hasattr(outqueue, '_reader'): debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (IOError, EOFError): pass debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), self._state) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' Worker = Worker Supervisor = Supervisor TaskHandler = TaskHandler TimeoutHandler = TimeoutHandler ResultHandler = ResultHandler SoftTimeLimitExceeded = SoftTimeLimitExceeded def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, timeout=None, soft_timeout=None, lost_worker_timeout=None, max_restarts=None, max_restart_freq=1, on_process_up=None, on_process_down=None, on_timeout_set=None, on_timeout_cancel=None, threads=True, semaphore=None, putlocks=False, allow_restart=False, synack=False, on_process_exit=None, **kwargs): self.synack = synack self._setup_queues() self._taskqueue = Queue() self._cache = {} self._state = RUN self.timeout = timeout self.soft_timeout = soft_timeout self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs self._on_process_exit = on_process_exit self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT self.on_process_up = on_process_up self.on_process_down = on_process_down self.on_timeout_set = on_timeout_set self.on_timeout_cancel = on_timeout_cancel self.threads = threads self.readers = {} self.allow_restart = allow_restart if soft_timeout and SIG_SOFT_TIMEOUT is None: warnings.warn(UserWarning( "Soft timeouts are not supported: " "on this platform: It does not have the SIGUSR1 signal.", )) soft_timeout = None self._processes = self.cpu_count() if processes is None else processes self.max_restarts = max_restarts or round(self._processes * 100) self.restart_state = restart_state(max_restarts, max_restart_freq or 1) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') if on_process_exit is not None and not callable(on_process_exit): raise TypeError('on_process_exit must be callable') self._pool = [] self._poolctrl = {} self.putlocks = putlocks self._putlock = semaphore or LaxBoundedSemaphore(self._processes) for i in range(self._processes): self._create_worker_process(i) self._worker_handler = self.Supervisor(self) if threads: self._worker_handler.start() self._task_handler = self.TaskHandler(self._taskqueue, self._quick_put, self._outqueue, self._pool) if threads: self._task_handler.start() # Thread killing timedout jobs. self._timeout_handler = self.TimeoutHandler( self._pool, self._cache, self.soft_timeout, self.timeout, ) self._timeout_handler_mutex = Lock() self._timeout_handler_started = False if self.timeout is not None or self.soft_timeout is not None: self._start_timeout_handler() # If running without threads, we need to check for timeouts # while waiting for unfinished work at shutdown. self.check_timeouts = None if not threads: self.check_timeouts = self._timeout_handler.handle_event # Thread processing results in the outqueue. self._result_handler = self.create_result_handler() self.handle_result_event = self._result_handler.handle_event if threads: self._result_handler.start() self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache, self._timeout_handler, self._help_stuff_finish_args()), exitpriority=15, ) def create_result_handler(self, **extra_kwargs): return self.ResultHandler( self._outqueue, self._quick_get, self._cache, self._poll_result, self._join_exited_workers, self._putlock, self.restart_state, self.check_timeouts, self.on_job_ready, **extra_kwargs ) def on_job_ready(self, job, i, obj, inqW_fd): pass def _help_stuff_finish_args(self): return self._inqueue, self._task_handler, self._pool def cpu_count(self): try: return cpu_count() except NotImplementedError: return 1 def handle_result_event(self, *args): return self._result_handler.handle_event(*args) def _process_register_queues(self, worker, queues): pass def _process_by_pid(self, pid): return next(( (proc, i) for i, proc in enumerate(self._pool) if proc.pid == pid ), (None, None)) def get_process_queues(self): return self._inqueue, self._outqueue, None def _create_worker_process(self, i): sentinel = Event() if self.allow_restart else None inq, outq, synq = self.get_process_queues() w = self.Worker( inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, # Need to handle all signals if using the ipc semaphore, # to make sure the semaphore is released. sigprotection=self.threads, ) self._pool.append(w) self._process_register_queues(w, (inq, outq, synq)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.index = i w.start() self._poolctrl[w.pid] = sentinel if self.on_process_up: self.on_process_up(w) return w def process_flush_queues(self, worker): pass def _join_exited_workers(self, shutdown=False): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ now = None # The worker may have published a result before being terminated, # but we have no way to accurately tell if it did. So we wait for # _lost_worker_timeout seconds before we mark the job with # WorkerLostError. for job in [job for job in list(self._cache.values()) if not job.ready() and job._worker_lost]: now = now or monotonic() lost_time, lost_ret = job._worker_lost if now - lost_time > job._lost_worker_timeout: self.mark_as_worker_lost(job, lost_ret) if shutdown and not len(self._pool): raise WorkersJoined() cleaned, exitcodes = {}, {} for i in reversed(range(len(self._pool))): worker = self._pool[i] exitcode = worker.exitcode popen = worker._popen if popen is None or exitcode is not None: # worker exited debug('Supervisor: cleaning up worker %d', i) if popen is not None: worker.join() debug('Supervisor: worked %d joined', i) cleaned[worker.pid] = worker exitcodes[worker.pid] = exitcode if exitcode not in (EX_OK, EX_RECYCLE) and \ not getattr(worker, '_controlled_termination', False): error( 'Process %r pid:%r exited with exitcode %r', worker.name, worker.pid, exitcode, exc_info=0, ) self.process_flush_queues(worker) del self._pool[i] del self._poolctrl[worker.pid] if cleaned: all_pids = [w.pid for w in self._pool] for job in list(self._cache.values()): acked_by_gone = next( (pid for pid in job.worker_pids() if pid in cleaned or pid not in all_pids), None ) # already accepted by process if acked_by_gone: self.on_job_process_down(job, acked_by_gone) if not job.ready(): exitcode = exitcodes.get(acked_by_gone) or 0 proc = cleaned.get(acked_by_gone) if proc and getattr(proc, '_job_terminated', False): job._set_terminated(exitcode) else: self.on_job_process_lost( job, acked_by_gone, exitcode, ) else: # started writing to write_to = job._write_to # was scheduled to write to sched_for = job._scheduled_for if write_to and not write_to._is_alive(): self.on_job_process_down(job, write_to.pid) elif sched_for and not sched_for._is_alive(): self.on_job_process_down(job, sched_for.pid) for worker in values(cleaned): if self.on_process_down: if not shutdown: self._process_cleanup_queues(worker) self.on_process_down(worker) return list(exitcodes.values()) return [] def on_partial_read(self, job, worker): pass def _process_cleanup_queues(self, worker): pass def on_job_process_down(self, job, pid_gone): pass def on_job_process_lost(self, job, pid, exitcode): job._worker_lost = (monotonic(), exitcode) def mark_as_worker_lost(self, job, exitcode): try: raise WorkerLostError( 'Worker exited prematurely: {0}.'.format( human_status(exitcode)), ) except WorkerLostError: job._set(None, (False, ExceptionInfo())) else: # pragma: no cover pass def __enter__(self): return self def __exit__(self, *exc_info): return self.terminate() def on_grow(self, n): pass def on_shrink(self, n): pass def shrink(self, n=1): for i, worker in enumerate(self._iterinactive()): self._processes -= 1 if self._putlock: self._putlock.shrink() worker.terminate_controlled() self.on_shrink(1) if i == n - 1: return raise ValueError("Can't shrink pool. All processes busy!") def grow(self, n=1): for i in range(n): self._processes += 1 if self._putlock: self._putlock.grow() self.on_grow(n) def _iterinactive(self): for worker in self._pool: if not self._worker_active(worker): yield worker raise StopIteration() def _worker_active(self, worker): for job in values(self._cache): if worker.pid in job.worker_pids(): return True return False def _repopulate_pool(self, exitcodes): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): if self._state != RUN: return try: if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE): self.restart_state.step() except IndexError: self.restart_state.step() self._create_worker_process(self._avail_index()) debug('added worker') def _avail_index(self): assert len(self._pool) < self._processes indices = set(p.index for p in self._pool) return next(i for i in range(self._processes) if i not in indices) def did_start_ok(self): return not self._join_exited_workers() def _maintain_pool(self): """"Clean up any exited workers and start replacements for them. """ joined = self._join_exited_workers() self._repopulate_pool(joined) for i in range(len(joined)): if self._putlock is not None: self._putlock.release() def maintain_pool(self): if self._worker_handler._state == RUN and self._state == RUN: try: self._maintain_pool() except RestartFreqExceeded: self.close() self.join() raise except OSError as exc: if get_errno(exc) == errno.ENOMEM: reraise(MemoryError, MemoryError(str(exc)), sys.exc_info()[2]) raise def _setup_queues(self): from billiard.queues import SimpleQueue self._inqueue = SimpleQueue() self._outqueue = SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _poll_result(timeout): if self._outqueue._reader.poll(timeout): return True, self._quick_get() return False, None self._poll_result = _poll_result def _start_timeout_handler(self): # ensure more than one thread does not start the timeout handler # thread at once. if self.threads: with self._timeout_handler_mutex: if not self._timeout_handler_started: self._timeout_handler_started = True self._timeout_handler.start() def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwargs)`. ''' if self._state == RUN: return self.apply_async(func, args, kwds).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' if self._state == RUN: return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' if self._state == RUN: return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' if self._state == RUN: return self.map_async(func, iterable, chunksize).get() def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' if self._state != RUN: return lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if chunksize == 1: result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1, lost_worker_timeout=None): ''' Like `imap()` method but ordering of results is arbitrary. ''' if self._state != RUN: return lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if chunksize == 1: result = IMapUnorderedIterator( self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator( self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None, accept_callback=None, timeout_callback=None, waitforslot=None, soft_timeout=None, timeout=None, lost_worker_timeout=None, callbacks_propagate=(), correlation_id=None): ''' Asynchronous equivalent of `apply()` method. Callback is called when the functions return value is ready. The accept callback is called when the job is accepted to be executed. Simplified the flow is like this: >>> def apply_async(func, args, kwds, callback, accept_callback): ... if accept_callback: ... accept_callback() ... retval = func(*args, **kwds) ... if callback: ... callback(retval) ''' if self._state != RUN: return soft_timeout = soft_timeout or self.soft_timeout timeout = timeout or self.timeout lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if soft_timeout and SIG_SOFT_TIMEOUT is None: warnings.warn(UserWarning( "Soft timeouts are not supported: " "on this platform: It does not have the SIGUSR1 signal.", )) soft_timeout = None if self._state == RUN: waitforslot = self.putlocks if waitforslot is None else waitforslot if waitforslot and self._putlock is not None: self._putlock.acquire() result = ApplyResult( self._cache, callback, accept_callback, timeout_callback, error_callback, soft_timeout, timeout, lost_worker_timeout, on_timeout_set=self.on_timeout_set, on_timeout_cancel=self.on_timeout_cancel, callbacks_propagate=callbacks_propagate, send_ack=self.send_ack if self.synack else None, correlation_id=correlation_id, ) if timeout or soft_timeout: # start the timeout handler thread when required. self._start_timeout_handler() if self.threads: self._taskqueue.put(([(TASK, (result._job, None, func, args, kwds))], None)) else: self._quick_put((TASK, (result._job, None, func, args, kwds))) return result def send_ack(self, response, job, i, fd): pass def terminate_job(self, pid, sig=None): proc, _ = self._process_by_pid(pid) if proc is not None: try: _kill(pid, sig or signal.SIGTERM) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise else: proc._controlled_termination = True proc._job_terminated = True def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous equivalent of `map()` method. ''' return self._map_async( func, iterable, mapstar, chunksize, callback, error_callback, ) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' if self._state != RUN: return if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {})) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled', ) def close(self): debug('closing pool') if self._state == RUN: self._state = CLOSE if self._putlock: self._putlock.clear() self._worker_handler.close() self._taskqueue.put(None) stop_if_not_current(self._worker_handler) def terminate(self): debug('terminating pool') self._state = TERMINATE self._worker_handler.terminate() self._terminate() @staticmethod def _stop_task_handler(task_handler): stop_if_not_current(task_handler) def join(self): assert self._state in (CLOSE, TERMINATE) debug('joining worker handler') stop_if_not_current(self._worker_handler) debug('joining task handler') self._stop_task_handler(self._task_handler) debug('joining result handler') stop_if_not_current(self._result_handler) debug('result handler joined') for i, p in enumerate(self._pool): debug('joining worker %s/%s (%r)', i+1, len(self._pool), p) if p._popen is not None: # process started? p.join() debug('pool join complete') def restart(self): for e in values(self._poolctrl): e.set() @staticmethod def _help_stuff_finish(inqueue, task_handler, _pool): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _set_result_sentinel(cls, outqueue, pool): outqueue.put(None) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache, timeout_handler, help_stuff_finish_args): # this is guaranteed to only be called once debug('finalizing pool') worker_handler.terminate() task_handler.terminate() taskqueue.put(None) # sentinel debug('helping task handler/workers to finish') cls._help_stuff_finish(*help_stuff_finish_args) result_handler.terminate() cls._set_result_sentinel(outqueue, pool) if timeout_handler is not None: timeout_handler.terminate() # Terminate workers which haven't already finished if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: if p._is_alive(): p.terminate() debug('joining task handler') cls._stop_task_handler(task_handler) debug('joining result handler') result_handler.stop() if timeout_handler is not None: debug('joining timeout handler') timeout_handler.stop(TIMEOUT_MAX) if pool and hasattr(pool[0], 'terminate'): debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited debug('cleaning up worker %d', p.pid) if p._popen is not None: p.join() debug('pool workers joined') @property def process_sentinels(self): return [w._popen.sentinel for w in self._pool] # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): _worker_lost = None _write_to = None _scheduled_for = None def __init__(self, cache, callback, accept_callback=None, timeout_callback=None, error_callback=None, soft_timeout=None, timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT, on_timeout_set=None, on_timeout_cancel=None, callbacks_propagate=(), send_ack=None, correlation_id=None): self.correlation_id = correlation_id self._mutex = Lock() self._event = threading.Event() self._job = next(job_counter) self._cache = cache self._callback = callback self._accept_callback = accept_callback self._error_callback = error_callback self._timeout_callback = timeout_callback self._timeout = timeout self._terminated = None self._soft_timeout = soft_timeout self._lost_worker_timeout = lost_worker_timeout self._on_timeout_set = on_timeout_set self._on_timeout_cancel = on_timeout_cancel self._callbacks_propagate = callbacks_propagate or () self._send_ack = send_ack self._accepted = False self._cancelled = False self._worker_pid = None self._time_accepted = None cache[self._job] = self def __repr__(self): return ''.format( id=self._job, ack=self._accepted, ready=self.ready(), ) def ready(self): return self._event.isSet() def accepted(self): return self._accepted def successful(self): assert self.ready() return self._success def _cancel(self): """Only works if synack is used.""" self._cancelled = True def discard(self): self._cache.pop(self._job, None) def terminate(self, signum): self._terminated = signum def _set_terminated(self, signum=None): try: raise Terminated(-(signum or 0)) except Terminated: self._set(None, (False, ExceptionInfo())) def worker_pids(self): return [self._worker_pid] if self._worker_pid else [] def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value.exception def safe_apply_callback(self, fun, *args): if fun: try: fun(*args) except self._callbacks_propagate: raise except Exception as exc: error('Pool callback raised exception: %r', exc, exc_info=1) def _set(self, i, obj): with self._mutex: if self._on_timeout_cancel: self._on_timeout_cancel(self) self._success, self._value = obj self._event.set() if self._accepted: # if not accepted yet, then the set message # was received before the ack, which means # the ack will remove the entry. self._cache.pop(self._job, None) # apply callbacks last if self._callback and self._success: self.safe_apply_callback( self._callback, self._value) if (self._value is not None and self._error_callback and not self._success): self.safe_apply_callback( self._error_callback, self._value) def _ack(self, i, time_accepted, pid, synqW_fd): with self._mutex: if self._cancelled and self._send_ack: self._accepted = True if synqW_fd: return self._send_ack(NACK, pid, self._job, synqW_fd) return self._accepted = True self._time_accepted = time_accepted self._worker_pid = pid if self.ready(): # ack received after set() self._cache.pop(self._job, None) if self._on_timeout_set: self._on_timeout_set(self, self._soft_timeout, self._timeout) response = ACK if self._accept_callback: try: self._accept_callback(pid, time_accepted) except self._propagate_errors: response = NACK raise except Exception: response = NACK # ignore other errors finally: if self._send_ack and synqW_fd: return self._send_ack( response, pid, self._job, synqW_fd ) if self._send_ack and synqW_fd: self._send_ack(response, pid, self._job, synqW_fd) # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback, error_callback): ApplyResult.__init__( self, cache, callback, error_callback=error_callback, ) self._success = True self._length = length self._value = [None] * length self._accepted = [False] * length self._worker_pid = [None] * length self._time_accepted = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del cache[self._job] else: self._number_left = length // chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i * self._chunksize:(i + 1) * self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) if self._accepted: self._cache.pop(self._job, None) self._event.set() else: self._success = False self._value = result if self._error_callback: self._error_callback(self._value) if self._accepted: self._cache.pop(self._job, None) self._event.set() def _ack(self, i, time_accepted, pid): start = i * self._chunksize stop = (i + 1) * self._chunksize for j in range(start, stop): self._accepted[j] = True self._worker_pid[j] = pid self._time_accepted[j] = time_accepted if self.ready(): self._cache.pop(self._job, None) def accepted(self): return all(self._accepted) def worker_pids(self): return [pid for pid in self._worker_pid if pid] # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): _worker_lost = None def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT): self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = cache self._items = deque() self._index = 0 self._length = None self._ready = False self._unsorted = {} self._worker_pids = [] self._lost_worker_timeout = lost_worker_timeout cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._ready = True raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._ready = True raise StopIteration raise TimeoutError success, value = item if success: return value raise Exception(value) __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: self._ready = True del self._cache[self._job] def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._ready = True self._cond.notify() del self._cache[self._job] def _ack(self, i, time_accepted, pid): self._worker_pids.append(pid) def ready(self): return self._ready def worker_pids(self): return self._worker_pids # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: self._ready = True del self._cache[self._job] # # # class ThreadPool(Pool): from billiard.dummy import Process as DummyProcess Process = DummyProcess def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = Queue() self._outqueue = Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _poll_result(timeout): try: return True, self._quick_get(timeout=timeout) except Empty: return False, None self._poll_result = _poll_result @staticmethod def _help_stuff_finish(inqueue, task_handler, pool): # put sentinels at head of inqueue to make workers finish with inqueue.not_empty: inqueue.queue.clear() inqueue.queue.extend([None] * len(pool)) inqueue.not_empty.notify_all() billiard-3.3.0.15/billiard/process.py0000644000076500000000000002472712270022117017755 0ustar asksolwheel00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = ['Process', 'current_process', 'active_children'] # # Imports # import os import sys import signal import itertools import binascii import logging import threading from multiprocessing import process as _mproc from .compat import bytes try: from _weakrefset import WeakSet except ImportError: WeakSet = None # noqa from .five import items, string_t try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def _set_current_process(process): global _current_process _current_process = _mproc._current_process = process def _cleanup(): # check for processes which have finished if _current_process is not None: for p in list(_current_process._children): if p._popen.poll() is not None: _current_process._children.discard(p) def _maybe_flush(f): try: f.flush() except (AttributeError, EnvironmentError, NotImplementedError): pass def active_children(_cleanup=_cleanup): ''' Return list of process objects corresponding to live child processes ''' try: _cleanup() except TypeError: # called after gc collect so _cleanup does not exist anymore return [] if _current_process is not None: return list(_current_process._children) return [] class Process(object): ''' Process objects represent activity that is run in a separate process The class is analagous to `threading.Thread` ''' _Popen = None def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, daemon=None, **_kw): assert group is None, 'group argument must be None for now' count = next(_current_process._counter) self._identity = _current_process._identity + (count,) self._authkey = _current_process._authkey if daemon is not None: self._daemonic = daemon else: self._daemonic = _current_process._daemonic self._tempdir = _current_process._tempdir self._semprefix = _current_process._semprefix self._unlinkfd = _current_process._unlinkfd self._parent_pid = os.getpid() self._popen = None self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = ( name or type(self).__name__ + '-' + ':'.join(str(i) for i in self._identity) ) if _dangling is not None: _dangling.add(self) def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' _cleanup() if self._Popen is not None: Popen = self._Popen else: from .forking import Popen self._popen = Popen(self) self._sentinel = self._popen.sentinel _current_process._children.add(self) def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._popen.terminate() def join(self, timeout=None): ''' Wait until child process terminates ''' assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _current_process._children.discard(self) def is_alive(self): ''' Return whether process is alive ''' if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False self._popen.poll() return self._popen.returncode is None def _is_alive(self): if self._popen is None: return False return self._popen.poll() is None def _get_name(self): return self._name def _set_name(self, value): assert isinstance(name, string_t), 'name must be a string' self._name = value name = property(_get_name, _set_name) def _get_daemon(self): return self._daemonic def _set_daemon(self, daemonic): assert self._popen is None, 'process has already started' self._daemonic = daemonic daemon = property(_get_daemon, _set_daemon) def _get_authkey(self): return self._authkey def _set_authkey(self, authkey): self._authkey = AuthenticationString(authkey) authkey = property(_get_authkey, _set_authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' try: return self._sentinel except AttributeError: raise ValueError("process not started") def __repr__(self): if self is _current_process: status = 'started' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: if self._popen.poll() is not None: status = self.exitcode else: status = 'started' if type(status) is int: if status == 0: status = 'stopped' else: status = 'stopped[%s]' % _exitcode_to_name.get(status, status) return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self._daemonic and ' daemon' or '') ## def _bootstrap(self): from . import util global _current_process try: self._children = set() self._counter = itertools.count(1) if sys.stdin is not None: try: sys.stdin.close() sys.stdin = open(os.devnull) except (OSError, ValueError): pass old_process = _current_process _set_current_process(self) # Re-init logging system. # Workaround for http://bugs.python.org/issue6721/#msg140215 # Python logging module uses RLock() objects which are broken # after fork. This can result in a deadlock (Celery Issue #496). loggerDict = logging.Logger.manager.loggerDict logger_names = list(loggerDict.keys()) logger_names.append(None) # for root logger for name in logger_names: if not name or not isinstance(loggerDict[name], logging.PlaceHolder): for handler in logging.getLogger(name).handlers: handler.createLock() logging._lock = threading.RLock() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process %s calling self.run()', self.pid) try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as exc: if not exc.args: exitcode = 1 elif isinstance(exc.args[0], int): exitcode = exc.args[0] else: sys.stderr.write(str(exc.args[0]) + '\n') _maybe_flush(sys.stderr) exitcode = 0 if isinstance(exc.args[0], str) else 1 except: exitcode = 1 if not util.error('Process %s', self.name, exc_info=True): import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: util.info('process %s exiting with exitcode %d', self.pid, exitcode) _maybe_flush(sys.stdout) _maybe_flush(sys.stderr) return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .forking import Popen if not Popen.thread_is_spawning(): raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons') return AuthenticationString, (bytes(self),) # # Create object representing the main process # class _MainProcess(Process): def __init__(self): self._identity = () self._daemonic = False self._name = 'MainProcess' self._parent_pid = None self._popen = None self._counter = itertools.count(1) self._children = set() self._authkey = AuthenticationString(os.urandom(32)) self._tempdir = None self._semprefix = 'mp-' + binascii.hexlify( os.urandom(4)).decode('ascii') self._unlinkfd = None _current_process = _MainProcess() del _MainProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in items(signal.__dict__): if name[:3] == 'SIG' and '_' not in name: _exitcode_to_name[-signum] = name _dangling = WeakSet() if WeakSet is not None else None billiard-3.3.0.15/billiard/py2/0000755000076500000000000000000012276217622016440 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/py2/__init__.py0000644000076500000000000000000012271740666020543 0ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/py2/connection.py0000644000076500000000000003321612271741140021146 0ustar asksolwheel00000000000000# # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = ['Client', 'Listener', 'Pipe'] import os import sys import socket import errno import time import tempfile import itertools from .. import AuthenticationError from .. import reduction from .._ext import _billiard, win32 from ..compat import get_errno, setblocking, bytes as cbytes from ..five import monotonic from ..forking import duplicate, close from ..reduction import ForkingPickler from ..util import get_temp_dir, Finalize, sub_debug, debug try: WindowsError = WindowsError # noqa except NameError: WindowsError = None # noqa # global set later xmlrpclib = None Connection = getattr(_billiard, 'Connection', None) PipeConnection = getattr(_billiard, 'PipeConnection', None) # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return monotonic() + timeout def _check_timeout(t): return monotonic() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter))) else: raise ValueError('unrecognized family') def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str: return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = (family or (address and address_type(address)) or default_family) address = address or arbitrary_address(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise IOError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' if self._listener is not None: self._listener.close() self._listener = None address = property(lambda self: self._listener._address) last_accepted = property(lambda self: self._listener._last_accepted) def __enter__(self): return self def __exit__(self, *exc_args): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c if sys.platform != 'win32': def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(not rnonblock) s2.setblocking(not wnonblock) c1 = Connection(os.dup(s1.fileno())) c2 = Connection(os.dup(s2.fileno())) s1.close() s2.close() else: fd1, fd2 = os.pipe() if rnonblock: setblocking(fd1, 0) if wnonblock: setblocking(fd2, 0) c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa ''' Returns pair of connection objects at either end of a pipe ''' address = arbitrary_address('AF_PIPE') if duplex: openmode = win32.PIPE_ACCESS_DUPLEX access = win32.GENERIC_READ | win32.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = win32.PIPE_ACCESS_INBOUND access = win32.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = win32.CreateNamedPipe( address, openmode, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, 1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) h2 = win32.CreateFile( address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL ) win32.SetNamedPipeHandleState( h2, win32.PIPE_READMODE_MESSAGE, None, None ) try: win32.ConnectNamedPipe(h1, win32.NULL) except WindowsError as exc: if exc.args[0] != win32.ERROR_PIPE_CONNECTED: raise c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (Issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX': self._unlink = Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): s, self._last_accepted = self._socket.accept() fd = duplicate(s.fileno()) conn = Connection(fd) s.close() return conn def close(self): self._socket.close() if self._unlink is not None: self._unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) s = socket.socket(getattr(socket, family)) t = _init_timeout() while 1: try: s.connect(address) except socket.error as exc: if get_errno(exc) != errno.ECONNREFUSED or _check_timeout(t): debug('failed to connect to address %s', address) raise time.sleep(0.01) else: break else: raise fd = duplicate(s.fileno()) conn = Connection(fd) s.close() return conn # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address handle = win32.CreateNamedPipe( address, win32.PIPE_ACCESS_DUPLEX, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) self._handle_queue = [handle] self._last_accepted = None sub_debug('listener created with address=%r', self._address) self.close = Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def accept(self): newhandle = win32.CreateNamedPipe( self._address, win32.PIPE_ACCESS_DUPLEX, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, win32.NMPWAIT_WAIT_FOREVER, win32.NULL ) self._handle_queue.append(newhandle) handle = self._handle_queue.pop(0) try: win32.ConnectNamedPipe(handle, win32.NULL) except WindowsError as exc: if exc.args[0] != win32.ERROR_PIPE_CONNECTED: raise return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): sub_debug('closing listener with address=%r', address) for handle in queue: close(handle) def PipeClient(address): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: win32.WaitNamedPipe(address, 1000) h = win32.CreateFile( address, win32.GENERIC_READ | win32.GENERIC_WRITE, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL, ) except WindowsError as exc: if exc.args[0] not in ( win32.ERROR_SEM_TIMEOUT, win32.ERROR_PIPE_BUSY) or _check_timeout(t): raise else: break else: raise win32.SetNamedPipeHandleState( h, win32.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = cbytes('#CHALLENGE#', 'ascii') WELCOME = cbytes('#WELCOME#', 'ascii') FAILURE = cbytes('#FAILURE#', 'ascii') def deliver_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message).digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message).digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf8')) return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpclib # noqa obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpclib # noqa return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) if sys.platform == 'win32': ForkingPickler.register(socket.socket, reduction.reduce_socket) ForkingPickler.register(Connection, reduction.reduce_connection) ForkingPickler.register(PipeConnection, reduction.reduce_pipe_connection) else: ForkingPickler.register(socket.socket, reduction.reduce_socket) ForkingPickler.register(Connection, reduction.reduce_connection) billiard-3.3.0.15/billiard/py2/reduction.py0000644000076500000000000001436612271741210021006 0ustar asksolwheel00000000000000# # Module to allow connection and socket objects to be transferred # between processes # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = [] import os import sys import socket import threading from pickle import Pickler from .. import current_process from .._ext import _billiard, win32 from ..util import register_after_fork, debug, sub_debug is_win32 = sys.platform == 'win32' is_pypy = hasattr(sys, 'pypy_version_info') is_py3k = sys.version_info[0] == 3 if not(is_win32 or is_pypy or is_py3k or hasattr(_billiard, 'recvfd')): raise ImportError('pickling of connections not supported') close = win32.CloseHandle if sys.platform == 'win32' else os.close # globals set later _listener = None _lock = None _cache = set() # # ForkingPickler # class ForkingPickler(Pickler): # noqa dispatch = Pickler.dispatch.copy() @classmethod def register(cls, type, reduce): def dispatcher(self, obj): rv = reduce(obj) self.save_reduce(obj=obj, *rv) cls.dispatch[type] = dispatcher def _reduce_method(m): # noqa if m.__self__ is None: return getattr, (m.__self__.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) ForkingPickler.register(type(ForkingPickler.save), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) ForkingPickler.register(type(list.append), _reduce_method_descriptor) ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) try: from functools import partial except ImportError: pass else: def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return partial(func, *args, **keywords) ForkingPickler.register(partial, _reduce_partial) def dump(obj, file, protocol=None): ForkingPickler(file, protocol).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # XXX Should this subprocess import be here? import _subprocess # noqa def send_handle(conn, handle, destination_pid): from ..forking import duplicate process_handle = win32.OpenProcess( win32.PROCESS_ALL_ACCESS, False, destination_pid ) try: new_handle = duplicate(handle, process_handle) conn.send(new_handle) finally: close(process_handle) def recv_handle(conn): return conn.recv() else: def send_handle(conn, handle, destination_pid): # noqa _billiard.sendfd(conn.fileno(), handle) def recv_handle(conn): # noqa return _billiard.recvfd(conn.fileno()) # # Support for a per-process server thread which caches pickled handles # def _reset(obj): global _lock, _listener, _cache for h in _cache: close(h) _cache.clear() _lock = threading.Lock() _listener = None _reset(None) register_after_fork(_reset, _reset) def _get_listener(): global _listener if _listener is None: _lock.acquire() try: if _listener is None: from ..connection import Listener debug('starting listener and thread for sending handles') _listener = Listener(authkey=current_process().authkey) t = threading.Thread(target=_serve) t.daemon = True t.start() finally: _lock.release() return _listener def _serve(): from ..util import is_exiting, sub_warning while 1: try: conn = _listener.accept() handle_wanted, destination_pid = conn.recv() _cache.remove(handle_wanted) send_handle(conn, handle_wanted, destination_pid) close(handle_wanted) conn.close() except: if not is_exiting(): sub_warning('thread for sharing handles raised exception', exc_info=True) # # Functions to be used for pickling/unpickling objects with handles # def reduce_handle(handle): from ..forking import Popen, duplicate if Popen.thread_is_spawning(): return (None, Popen.duplicate_for_child(handle), True) dup_handle = duplicate(handle) _cache.add(dup_handle) sub_debug('reducing handle %d', handle) return (_get_listener().address, dup_handle, False) def rebuild_handle(pickled_data): from ..connection import Client address, handle, inherited = pickled_data if inherited: return handle sub_debug('rebuilding handle %d', handle) conn = Client(address, authkey=current_process().authkey) conn.send((handle, os.getpid())) new_handle = recv_handle(conn) conn.close() return new_handle # # Register `_billiard.Connection` with `ForkingPickler` # def reduce_connection(conn): rh = reduce_handle(conn.fileno()) return rebuild_connection, (rh, conn.readable, conn.writable) def rebuild_connection(reduced_handle, readable, writable): handle = rebuild_handle(reduced_handle) return _billiard.Connection( handle, readable=readable, writable=writable ) # Register `socket.socket` with `ForkingPickler` # def fromfd(fd, family, type_, proto=0): s = socket.fromfd(fd, family, type_, proto) if s.__class__ is not socket.socket: s = socket.socket(_sock=s) return s def reduce_socket(s): reduced_handle = reduce_handle(s.fileno()) return rebuild_socket, (reduced_handle, s.family, s.type, s.proto) def rebuild_socket(reduced_handle, family, type_, proto): fd = rebuild_handle(reduced_handle) _sock = fromfd(fd, family, type_, proto) close(fd) return _sock ForkingPickler.register(socket.socket, reduce_socket) # # Register `_billiard.PipeConnection` with `ForkingPickler` # if sys.platform == 'win32': def reduce_pipe_connection(conn): rh = reduce_handle(conn.fileno()) return rebuild_pipe_connection, (rh, conn.readable, conn.writable) def rebuild_pipe_connection(reduced_handle, readable, writable): handle = rebuild_handle(reduced_handle) return _billiard.PipeConnection( handle, readable=readable, writable=writable ) billiard-3.3.0.15/billiard/queues.py0000644000076500000000000002630212270022117017575 0ustar asksolwheel00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] import sys import os import threading import collections import weakref import errno from . import Pipe from ._ext import _billiard from .compat import get_errno from .five import monotonic from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition from .util import debug, error, info, Finalize, register_after_fork from .five import Empty, Full from .forking import assert_spawning class Queue(object): ''' Queue type using a pipe, buffer and thread ''' def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _billiard.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if block and timeout is None: with self._rlock: res = self._recv() self._sem.release() return res else: if block: deadline = monotonic() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - monotonic() if timeout < 0 or not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementedError on Mac OSX because # of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close, self._ignore_epipe), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send, writelock, close, ignore_epipe): debug('starting thread to feed data to pipe') from .util import is_exiting ncond = notempty nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wlock = writelock else: wlock = None try: while 1: with ncond: if not buffer: nwait() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wlock is None: send(obj) else: with wlock: send(obj) except IndexError: pass except Exception as exc: if ignore_epipe and get_errno(exc) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %r', exc, exc_info=True) else: if not error('error in queue thread: %r', exc, exc_info=True): import traceback traceback.print_exc() except Exception: pass _sentinel = object() class JoinableQueue(Queue): ''' A queue type which also supports join() and task_done() methods Note that if you do not call task_done() for each finished task then eventually the counter's semaphore may overflow causing Bad Things to happen. ''' def __init__(self, maxsize=0): Queue.__init__(self, maxsize) self._unfinished_tasks = Semaphore(0) self._cond = Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full with self._notempty: with self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() class _SimpleQueue(object): ''' Simplified Queue type -- really just a locked pipe ''' def __init__(self, rnonblock=False, wnonblock=False): self._reader, self._writer = Pipe( duplex=False, rnonblock=rnonblock, wnonblock=wnonblock, ) self._poll = self._reader.poll self._rlock = self._wlock = None self._make_methods() def empty(self): return not self._poll() def __getstate__(self): assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state self._make_methods() def _make_methods(self): recv = self._reader.recv try: recv_payload = self._reader.recv_payload except AttributeError: recv_payload = self._reader.recv_bytes rlock = self._rlock if rlock is not None: def get(): with rlock: return recv() self.get = get def get_payload(): with rlock: return recv_payload() self.get_payload = get_payload else: self.get = recv self.get_payload = recv_payload if self._wlock is None: # writes to a message oriented win32 pipe are atomic self.put = self._writer.send else: send = self._writer.send wlock = self._wlock def put(obj): with wlock: return send(obj) self.put = put class SimpleQueue(_SimpleQueue): def __init__(self): self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._wlock = Lock() if sys.platform != 'win32' else None self._make_methods() billiard-3.3.0.15/billiard/reduction.py0000644000076500000000000000030012271741057020264 0ustar asksolwheel00000000000000from __future__ import absolute_import import sys if sys.version_info[0] == 3: from .py3 import reduction else: from .py2 import reduction # noqa sys.modules[__name__] = reduction billiard-3.3.0.15/billiard/sharedctypes.py0000644000076500000000000001431412270022117020764 0ustar asksolwheel00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import ctypes import weakref from . import heap, RLock from .five import int_types from .forking import assert_spawning from .reduction import ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'f': ctypes.c_float, 'd': ctypes.c_double } def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int_types): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, **kwds): ''' Return a synchronization wrapper for a Value ''' lock = kwds.pop('lock', None) if kwds: raise ValueError( 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): lock = RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Return a synchronization wrapper for a RawArray ''' lock = kwds.pop('lock', None) if kwds: raise ValueError( 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): lock = RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock) return SynchronizedArray(obj, lock) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = dict((name, make_property(name)) for name in names) classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length ForkingPickler.register(type_, reduce_ctype) obj = type_.from_address(wrapper.get_address()) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name, ) * 7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None): self._obj = obj self._lock = lock or RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): self.acquire() try: return self._obj[i] finally: self.release() def __setitem__(self, i, value): self.acquire() try: self._obj[i] = value finally: self.release() def __getslice__(self, start, stop): self.acquire() try: return self._obj[start:stop] finally: self.release() def __setslice__(self, start, stop, values): self.acquire() try: self._obj[start:stop] = values finally: self.release() class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') billiard-3.3.0.15/billiard/synchronize.py0000644000076500000000000003217312270022117020644 0ustar asksolwheel00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', ] import itertools import os import signal import sys import threading from ._ext import _billiard, ensure_SemLock from .five import range, monotonic from .process import current_process from .util import Finalize, register_after_fork, debug from .forking import assert_spawning, Popen from .compat import bytes, closerange # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 ensure_SemLock() # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX try: sem_unlink = _billiard.SemLock.sem_unlink except AttributeError: sem_unlink = None # # Base class for semaphores and mutexes; wraps `_billiard.SemLock` # def _semname(sl): try: return sl.name except AttributeError: pass class SemLock(object): _counter = itertools.count() def __init__(self, kind, value, maxvalue): from .forking import _forking_is_enabled unlink_immediately = _forking_is_enabled or sys.platform == 'win32' if sem_unlink: sl = self._semlock = _billiard.SemLock( kind, value, maxvalue, self._make_name(), unlink_immediately) else: sl = self._semlock = _billiard.SemLock(kind, value, maxvalue) debug('created semlock with handle %s', sl.handle) self._make_methods() if sem_unlink: if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() register_after_fork(self, _after_fork) if _semname(self._semlock) is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name Finalize(self, sem_unlink, (self._semlock.name,), exitpriority=0) # In case of abnormal termination unlink semaphore name _cleanup_semaphore_if_leaked(self._semlock.name) def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): assert_spawning(self) sl = self._semlock state = (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) try: state += (sl.name, ) except AttributeError: pass return state def __setstate__(self, state): self._semlock = _billiard.SemLock._rebuild(*state) debug('recreated blocker with handle %r', state[0]) self._make_methods() @staticmethod def _make_name(): return '/%s-%s-%s' % (current_process()._semprefix, os.getpid(), next(SemLock._counter)) class Semaphore(SemLock): def __init__(self, value=1): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '' % value class BoundedSemaphore(Semaphore): def __init__(self, value=1): SemLock.__init__(self, SEMAPHORE, value, value) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '' % \ (value, self._semlock.maxvalue) class Lock(SemLock): ''' Non-recursive lock. ''' def __init__(self): SemLock.__init__(self, SEMAPHORE, 1, 1) def __repr__(self): try: if self._semlock._is_mine(): name = current_process().name if threading.currentThread().name != 'MainThread': name += '|' + threading.currentThread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '' % name class RLock(SemLock): ''' Recursive lock ''' def __init__(self): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1) def __repr__(self): try: if self._semlock._is_mine(): name = current_process().name if threading.currentThread().name != 'MainThread': name += '|' + threading.currentThread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '' % (name, count) class Condition(object): ''' Condition variable ''' def __init__(self, lock=None): self._lock = lock or RLock() self._sleeping_count = Semaphore(0) self._woken_count = Semaphore(0) self._wait_semaphore = Semaphore(0) self._make_methods() def __getstate__(self): assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unkown' return '' % (self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout ret = self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() return ret def notify(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res if self._sleeping_count.acquire(False): # try grabbing a sleeper self._wait_semaphore.release() # wake up one sleeper self._woken_count.acquire() # wait for sleeper to wake # rezero _wait_semaphore in case a timeout just happened self._wait_semaphore.acquire(False) def notify_all(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res sleepers = 0 while self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) result = predicate() return result class Event(object): def __init__(self): self._cond = Condition(Lock()) self._flag = Semaphore(0) def is_set(self): self._cond.acquire() try: if self._flag.acquire(False): self._flag.release() return True return False finally: self._cond.release() def set(self): self._cond.acquire() try: self._flag.acquire(False) self._flag.release() self._cond.notify_all() finally: self._cond.release() def clear(self): self._cond.acquire() try: self._flag.acquire(False) finally: self._cond.release() def wait(self, timeout=None): self._cond.acquire() try: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False finally: self._cond.release() if sys.platform != 'win32': # # Protection against unlinked semaphores if the program ends abnormally # and forking has been disabled. # def _cleanup_semaphore_if_leaked(name): name = name.encode('ascii') + bytes('\0', 'ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') fd = _get_unlinkfd() bits = os.write(fd, name) assert bits == len(name) def _get_unlinkfd(): cp = current_process() if cp._unlinkfd is None: r, w = os.pipe() pid = os.fork() if pid == 0: try: from setproctitle import setproctitle setproctitle("[sem_cleanup for %r]" % cp.pid) except: pass # Fork a process which will survive until all other processes # which have a copy of the write end of the pipe have exited. # The forked process just collects names of semaphores until # EOF is indicated. Then it tries unlinking all the names it # has collected. _collect_names_then_unlink(r) os._exit(0) os.close(r) cp._unlinkfd = w return cp._unlinkfd def _collect_names_then_unlink(r): # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) # close all fds except r try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 closerange(0, r) closerange(r + 1, MAXFD) # collect data written to pipe data = [] while 1: try: s = os.read(r, 512) except: # XXX IO lock might be held at fork, so don't try # printing unexpected exception - see issue 6721 pass else: if not s: break data.append(s) # attempt to unlink each collected name for name in bytes('', 'ascii').join(data).split(bytes('\0', 'ascii')): try: sem_unlink(name.decode('ascii')) except: # XXX IO lock might be held at fork, so don't try # printing unexpected exception - see issue 6721 pass billiard-3.3.0.15/billiard/tests/0000755000076500000000000000000012276217622017070 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard/tests/__init__.py0000644000076500000000000000103012270022117021156 0ustar asksolwheel00000000000000from __future__ import absolute_import import atexit def teardown(): # Workaround for multiprocessing bug where logging # is attempted after global already collected at shutdown. cancelled = set() try: import multiprocessing.util cancelled.add(multiprocessing.util._exit_function) except (AttributeError, ImportError): pass try: atexit._exithandlers[:] = [ e for e in atexit._exithandlers if e[0] not in cancelled ] except AttributeError: pass billiard-3.3.0.15/billiard/tests/compat.py0000644000076500000000000000565412270022117020722 0ustar asksolwheel00000000000000from __future__ import absolute_import import sys class WarningMessage(object): """Holds the result of a single showwarning() call.""" _WARNING_DETAILS = ('message', 'category', 'filename', 'lineno', 'file', 'line') def __init__(self, message, category, filename, lineno, file=None, line=None): local_values = locals() for attr in self._WARNING_DETAILS: setattr(self, attr, local_values[attr]) self._category_name = category and category.__name__ or None def __str__(self): return ('{message : %r, category : %r, filename : %r, lineno : %s, ' 'line : %r}' % (self.message, self._category_name, self.filename, self.lineno, self.line)) class catch_warnings(object): """A context manager that copies and restores the warnings filter upon exiting the context. The 'record' argument specifies whether warnings should be captured by a custom implementation of warnings.showwarning() and be appended to a list returned by the context manager. Otherwise None is returned by the context manager. The objects appended to the list are arguments whose attributes mirror the arguments to showwarning(). The 'module' argument is to specify an alternative module to the module named 'warnings' and imported under that name. This argument is only useful when testing the warnings module itself. """ def __init__(self, record=False, module=None): """Specify whether to record warnings and if an alternative module should be used other than sys.modules['warnings']. For compatibility with Python 3.0, please consider all arguments to be keyword-only. """ self._record = record self._module = module is None and sys.modules['warnings'] or module self._entered = False def __repr__(self): args = [] if self._record: args.append('record=True') if self._module is not sys.modules['warnings']: args.append('module=%r' % self._module) name = type(self).__name__ return '%s(%s)' % (name, ', '.join(args)) def __enter__(self): if self._entered: raise RuntimeError('Cannot enter %r twice' % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning if self._record: log = [] def showwarning(*args, **kwargs): log.append(WarningMessage(*args, **kwargs)) self._module.showwarning = showwarning return log def __exit__(self, *exc_info): if not self._entered: raise RuntimeError('Cannot exit %r without entering first' % self) self._module.filters = self._filters self._module.showwarning = self._showwarning billiard-3.3.0.15/billiard/tests/test_common.py0000644000076500000000000000611412270022117021756 0ustar asksolwheel00000000000000from __future__ import absolute_import import os import signal from contextlib import contextmanager from mock import call, patch, Mock from time import time from billiard.common import ( _shutdown_cleanup, reset_signals, restart_state, ) from .utils import Case def signo(name): return getattr(signal, name) @contextmanager def termsigs(default, full): from billiard import common prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full try: yield finally: common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full class test_reset_signals(Case): def test_shutdown_handler(self): with patch('sys.exit') as exit: _shutdown_cleanup(15, Mock()) self.assertTrue(exit.called) self.assertEqual(os.WTERMSIG(exit.call_args[0][0]), 15) def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']): with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET): self.assertFalse(SET.called) def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']): with self.assert_context(sigs, [], None) as (_, SET): self.assertFalse(SET.called) def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET): SET.assert_has_calls([ call(signo(sig), _shutdown_cleanup) for sig in sigs ]) def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): with self.assert_context(sigs, [], object()) as (_, SET): SET.assert_has_calls([ call(signo(sig), _shutdown_cleanup) for sig in sigs ]) def test_handles_errors(self, sigs=['SIGTERM']): for exc in (OSError(), AttributeError(), ValueError(), RuntimeError()): with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, SET): self.assertTrue(SET.called) @contextmanager def assert_context(self, default, full, get_returns=None, set_effect=None): with termsigs(default, full): with patch('signal.getsignal') as GET: with patch('signal.signal') as SET: GET.return_value = get_returns SET.side_effect = set_effect reset_signals() GET.assert_has_calls([ call(signo(sig)) for sig in default ]) yield GET, SET class test_restart_state(Case): def test_raises(self): s = restart_state(100, 1) # max 100 restarts in 1 second. s.R = 99 s.step() with self.assertRaises(s.RestartFreqExceeded): s.step() def test_time_passed_resets_counter(self): s = restart_state(100, 10) s.R, s.T = 100, time() with self.assertRaises(s.RestartFreqExceeded): s.step() s.R, s.T = 100, time() s.step(time() + 20) self.assertEqual(s.R, 1) billiard-3.3.0.15/billiard/tests/test_package.py0000644000076500000000000000036612270022117022064 0ustar asksolwheel00000000000000from __future__ import absolute_import import billiard from .utils import Case class test_billiard(Case): def test_has_version(self): self.assertTrue(billiard.__version__) self.assertIsInstance(billiard.__version__, str) billiard-3.3.0.15/billiard/tests/utils.py0000644000076500000000000001171512270022117020572 0ustar asksolwheel00000000000000from __future__ import absolute_import import re import sys import warnings try: import unittest # noqa unittest.skip from unittest.util import safe_repr, unorderable_list_difference except AttributeError: import unittest2 as unittest # noqa from unittest2.util import safe_repr, unorderable_list_difference # noqa from billiard.five import string_t, items, values from .compat import catch_warnings # -- adds assertWarns from recent unittest2, not in Python 2.7. class _AssertRaisesBaseContext(object): def __init__(self, expected, test_case, callable_obj=None, expected_regex=None): self.expected = expected self.failureException = test_case.failureException self.obj_name = None if isinstance(expected_regex, string_t): expected_regex = re.compile(expected_regex) self.expected_regex = expected_regex class _AssertWarnsContext(_AssertRaisesBaseContext): """A context manager used to implement TestCase.assertWarns* methods.""" def __enter__(self): # The __warningregistry__'s need to be in a pristine state for tests # to work properly. warnings.resetwarnings() for v in values(sys.modules): if getattr(v, '__warningregistry__', None): v.__warningregistry__ = {} self.warnings_manager = catch_warnings(record=True) self.warnings = self.warnings_manager.__enter__() warnings.simplefilter('always', self.expected) return self def __exit__(self, exc_type, exc_value, tb): self.warnings_manager.__exit__(exc_type, exc_value, tb) if exc_type is not None: # let unexpected exceptions pass through return try: exc_name = self.expected.__name__ except AttributeError: exc_name = str(self.expected) first_matching = None for m in self.warnings: w = m.message if not isinstance(w, self.expected): continue if first_matching is None: first_matching = w if (self.expected_regex is not None and not self.expected_regex.search(str(w))): continue # store warning for later retrieval self.warning = w self.filename = m.filename self.lineno = m.lineno return # Now we simply try to choose a helpful failure message if first_matching is not None: raise self.failureException( '%r does not match %r' % ( self.expected_regex.pattern, str(first_matching))) if self.obj_name: raise self.failureException( '%s not triggered by %s' % (exc_name, self.obj_name)) else: raise self.failureException('%s not triggered' % exc_name) class Case(unittest.TestCase): def assertWarns(self, expected_warning): return _AssertWarnsContext(expected_warning, self, None) def assertWarnsRegex(self, expected_warning, expected_regex): return _AssertWarnsContext(expected_warning, self, None, expected_regex) def assertDictContainsSubset(self, expected, actual, msg=None): missing, mismatched = [], [] for key, value in items(expected): if key not in actual: missing.append(key) elif value != actual[key]: mismatched.append('%s, expected: %s, actual: %s' % ( safe_repr(key), safe_repr(value), safe_repr(actual[key]))) if not (missing or mismatched): return standard_msg = '' if missing: standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) if mismatched: if standard_msg: standard_msg += '; ' standard_msg += 'Mismatched values: %s' % ( ','.join(mismatched)) self.fail(self._formatMessage(msg, standard_msg)) def assertItemsEqual(self, expected_seq, actual_seq, msg=None): missing = unexpected = None try: expected = sorted(expected_seq) actual = sorted(actual_seq) except TypeError: # Unsortable items (example: set(), complex(), ...) expected = list(expected_seq) actual = list(actual_seq) missing, unexpected = unorderable_list_difference( expected, actual) else: return self.assertSequenceEqual(expected, actual, msg=msg) errors = [] if missing: errors.append( 'Expected, but missing:\n %s' % (safe_repr(missing), ), ) if unexpected: errors.append( 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ), ) if errors: standardMsg = '\n'.join(errors) self.fail(self._formatMessage(msg, standardMsg)) billiard-3.3.0.15/billiard/util.py0000644000076500000000000000645212270022117017247 0ustar asksolwheel00000000000000# # Module providing various facilities to other parts of the package # # billiard/util.py # # Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import errno import functools import atexit from multiprocessing.util import ( # noqa _afterfork_registry, _afterfork_counter, _exit_function, _finalizer_registry, _finalizer_counter, Finalize, ForkAwareLocal, ForkAwareThreadLock, get_temp_dir, is_exiting, register_after_fork, _run_after_forkers, _run_finalizers, ) from .compat import get_errno __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 ERROR = 40 LOGGER_NAME = 'multiprocessing' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args, **kwargs): if _logger: _logger.log(SUBDEBUG, msg, *args, **kwargs) def debug(msg, *args, **kwargs): if _logger: _logger.log(DEBUG, msg, *args, **kwargs) return True return False def info(msg, *args, **kwargs): if _logger: _logger.log(INFO, msg, *args, **kwargs) return True return False def sub_warning(msg, *args, **kwargs): if _logger: _logger.log(SUBWARNING, msg, *args, **kwargs) return True return False def error(msg, *args, **kwargs): if _logger: _logger.log(ERROR, msg, *args, **kwargs) return True return False def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger def _eintr_retry(func): ''' Automatic retry after EINTR. ''' @functools.wraps(func) def wrapped(*args, **kwargs): while 1: try: return func(*args, **kwargs) except OSError as exc: if get_errno(exc) != errno.EINTR: raise return wrapped billiard-3.3.0.15/billiard.egg-info/0000755000076500000000000000000012276217622017420 5ustar asksolwheel00000000000000billiard-3.3.0.15/billiard.egg-info/dependency_links.txt0000644000076500000000000000000112276217616023471 0ustar asksolwheel00000000000000 billiard-3.3.0.15/billiard.egg-info/not-zip-safe0000644000076500000000000000000112276217616021651 0ustar asksolwheel00000000000000 billiard-3.3.0.15/billiard.egg-info/PKG-INFO0000644000076500000000000006107012276217616020524 0ustar asksolwheel00000000000000Metadata-Version: 1.1 Name: billiard Version: 3.3.0.15 Summary: Python multiprocessing fork with improvements and bugfixes Home-page: http://github.com/celery/billiard Author: Ask Solem', Author-email: ask@celeryproject.org License: BSD Description: ======== billiard ======== :version: 3.3.0.15 About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant is intended to be compatible with Python 2.4 and 2.5, and will draw it's fixes/improvements from python-trunk. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessin backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. .. image:: https://d2weczhvl823v0.cloudfront.net/celery/billiard/trend.png :alt: Bitdeli badge :target: https://bitdeli.com/free =========== Changes =========== 3.3.0.15 - 2014-02-10 --------------------- - Pool: Fixed "cannot join process not started" error. - Now uses billiard.py2 and billiard.py3 specific packages that are installed depending on the python version used. This way the installation will not import version specific modules (and possibly crash). 3.3.0.14 - 2014-01-17 --------------------- - Fixed problem with our backwards compatible ``bytes`` wrapper (Issue #103). - No longer expects frozen applications to have a valid ``__file__`` attribute. Fix contributed by George Sibble. 3.3.0.13 - 2013-12-13 --------------------- - Fixes compatability with Python < 2.7.6 - No longer attempts to handle ``SIGBUS`` Contributed by Vishal Vatsa. - Non-thread based pool now only handles signals: ``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``, ``SIGUSR2``. - setup.py: Only show compilation warning for build related commands. 3.3.0.12 - 2013-12-09 --------------------- - Fixed installation for Python 3. Contributed by Rickert Mulder. - Pool: Fixed bug with maxtasksperchild. Fix contributed by Ionel Cristian Maries. - Pool: Fixed bug in maintain_pool. 3.3.0.11 - 2013-12-03 --------------------- - Fixed Unicode error when installing the distribution (Issue #89). - Daemonic processes are now allowed to have children. But note that it will not be possible to automatically terminate them when the process exits. See discussion at https://github.com/celery/celery/issues/1709 - Pool: Would not always be able to detect that a process exited. 3.3.0.10 - 2013-12-02 --------------------- - Windows: Fixed problem with missing ``WAITABANDONED_0`` Fix contributed by Matthias Wagner - Windows: PipeConnection can now be inherited. Fix contributed by Matthias Wagner 3.3.0.9 - 2013-12-02 -------------------- - Temporary workaround for Celery maxtasksperchild issue. Fix contributed by Ionel Cristian Maries. 3.3.0.8 - 2013-11-21 -------------------- - Now also sets ``multiprocessing.current_process`` for compatibility with loggings ``processName`` field. 3.3.0.7 - 2013-11-15 -------------------- - Fixed compatibility with PyPy 2.1 + 2.2. - Fixed problem in pypy detection. Fix contributed by Tin Tvrtkovic. - Now uses ``ctypes.find_library`` instead of hardcoded path to find the OS X CoreServices framework. Fix contributed by Moritz Kassner. 3.3.0.6 - 2013-11-12 -------------------- - Now works without C extension again. - New ``_billiard.read(fd, buffer, [len, ])` function implements os.read with buffer support (new buffer API) - New pure-python implementation of ``Connection.send_offset``. 3.3.0.5 - 2013-11-11 -------------------- - All platforms except for Windows/PyPy/Jython now requires the C extension. 3.3.0.4 - 2013-11-11 -------------------- - Fixed problem with Python3 and setblocking. 3.3.0.3 - 2013-11-09 -------------------- - Now works on Windows again. 3.3.0.2 - 2013-11-08 -------------------- - ApplyResult.terminate() may be set to signify that the job must not be executed. It can be used in combination with Pool.terminate_job. - Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments to set the read or write end of the pipe to be nonblocking. - Pool: Log message included exception info but exception happened in another process so the resulting traceback was wrong. - Pool: Worker process can now prepare results before they are sent back to the main process (using ``Worker.prepare_result``). 3.3.0.1 - 2013-11-04 -------------------- - Pool: New ``correlation_id`` argument to ``apply_async`` can be used to set a related id for the ``ApplyResult`` object returned: >>> r = pool.apply_async(target, args, kwargs, correlation_id='foo') >>> r.correlation_id 'foo' - Pool: New callback `on_process_exit` is called when a pool process exits, with signature ``(pid, exitcode)``. Contributed by Daniel M. Taub. - Pool: Improved the too many restarts detection. 3.3.0.0 - 2013-10-14 -------------------- - Dual code base now runs on Python 2.6+ and Python 3. - No longer compatible with Python 2.5 - Includes many changes from multiprocessing in 3.4. - Now uses ``time.monotonic`` when available, also including fallback implementations for Linux and OS X. - No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE Contributed by Kevin Blackham - ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing. It's better to import these from multiprocessing directly now so that there aren't multiple registries. - New `billiard.queues._SimpleQueue` that does not use semaphores. - Pool: Can now be extended to support using multiple IPC queues. - Pool: Can now use async I/O to write to pool IPC queues. - Pool: New ``Worker.on_loop_stop`` handler can be used to add actions at pool worker process shutdown. Note that, like all finalization handlers, there is no guarantee that this will be executed. Contributed by dmtaub. 2.7.3.30 - 2013-06-28 --------------------- - Fixed ImportError in billiard._ext 2.7.3.29 - 2013-06-28 --------------------- - Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55) Fix contributed by Krzysztof Jagiello. - Process now releases logging locks after fork. This previously happened in Pool, but it was done too late as processes logs when they bootstrap. - Pool.terminate_job now ignores `No such process` errors. - billiard.Pool entrypoint did not support new arguments to billiard.pool.Pool - Connection inbound buffer size increased from 1kb to 128kb. - C extension cleaned up by properly adding a namespace to symbols. - _exit_function now works even if thread wakes up after gc collect. 2.7.3.28 - 2013-04-16 --------------------- - Pool: Fixed regression that disabled the deadlock fix in 2.7.3.24 - Pool: RestartFreqExceeded could be raised prematurely. - Process: Include pid in startup and process INFO logs. 2.7.3.27 - 2013-04-12 --------------------- - Manager now works again. - Python 3 fixes for billiard.connection. - Fixed invalid argument bug when running on Python 3.3 Fix contributed by Nathan Wan. - Ignore OSError when setting up signal handlers. 2.7.3.26 - 2013-04-09 --------------------- - Pool: Child processes must ignore SIGINT. 2.7.3.25 - 2013-04-09 --------------------- - Pool: 2.7.3.24 broke support for subprocesses (Issue #48). Signals that should be ignored were instead handled by terminating. 2.7.3.24 - 2013-04-08 --------------------- - Pool: Make sure finally blocks are called when process exits due to a signal. This fixes a deadlock problem when the process is killed while having acquired the shared semaphore. However, this solution does not protect against the processes being killed, a more elaborate solution is required for that. Hopefully this will be fixed soon in a later version. - Pool: Can now use GDB to debug pool child processes. - Fixes Python 3 compatibility problems. Contributed by Albertas Agejevas. 2.7.3.23 - 2013-03-22 --------------------- - Windows: Now catches SystemExit from setuptools while trying to build the C extension (Issue #41). 2.7.3.22 - 2013-03-08 --------------------- - Pool: apply_async now supports a ``callbacks_propagate`` keyword argument that can be a tuple of exceptions to propagate in callbacks. (callback, errback, accept_callback, timeout_callback). - Errors are no longer logged for OK and recycle exit codes. This would cause normal maxtasksperchild recycled process to log an error. - Fixed Python 2.5 compatibility problem (Issue #33). - FreeBSD: Compilation now disables semaphores if Python was built without it (Issue #40). Contributed by William Grzybowski 2.7.3.21 - 2013-02-11 --------------------- - Fixed typo EX_REUSE -> EX_RECYCLE - Code now conforms to new pep8.py rules. 2.7.3.20 - 2013-02-08 --------------------- - Pool: Disable restart limit if maxR is not set. - Pool: Now uses os.kill instead of signal.signal. Contributed by Lukasz Langa - Fixed name error in process.py - Pool: ApplyResult.get now properly raises exceptions. Fix contributed by xentac. 2.7.3.19 - 2012-11-30 --------------------- - Fixes problem at shutdown when gc has collected symbols. - Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32). - Fixes Python 3 compatibility issues 2.7.3.18 - 2012-11-05 --------------------- - [Pool] Fix for check_timeouts if not set. Fix contributed by Dmitry Sukhov - Fixed pickle problem with Traceback. Code.frame.__loader__ is now ignored as it may be set to an unpickleable object. - The Django old-layout warning was always showing. 2.7.3.17 - 2012-09-26 --------------------- - Fixes typo 2.7.3.16 - 2012-09-26 --------------------- - Windows: Fixes for SemLock._rebuild (Issue #24). - Pool: Job terminated with terminate_job now raises billiard.exceptions.Terminated. 2.7.3.15 - 2012-09-21 --------------------- - Windows: Fixes unpickling of SemLock when using fallback. - Windows: Fixes installation when no C compiler. 2.7.3.14 - 2012-09-20 --------------------- - Installation now works again for Python 3. 2.7.3.13 - 2012-09-14 --------------------- - Merged with Python trunk (many authors, many fixes: see Python changelog in trunk). - Using execv now also works with older Django projects using setup_environ (Issue #10). - Billiard now installs with a warning that the C extension could not be built if a compiler is not installed or the build fails in some other way. It really is recommended to have the C extension installed when running with force execv, but this change also makes it easier to install. - Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions cannot block the signal. Python signal handlers are called in the interpreter, so they cannot be called while a C extension is blocking the interpreter from running. - Now uses a timeout value for Thread.join that doesn't exceed the maximum on some platforms. - Fixed bug in the SemLock fallback used when C extensions not installed. Fix contributed by Mher Movsisyan. - Pool: Now sets a Process.index attribute for every process in the pool. This number will always be between 0 and concurrency-1, and can be used to e.g. create a logfile for each process in the pool without creating a new logfile whenever a process is replaced. 2.7.3.12 - 2012-08-05 --------------------- - Fixed Python 2.5 compatibility issue. - New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError 2.7.3.11 - 2012-08-01 --------------------- - Adds support for FreeBSD 7+ Fix contributed by koobs. - Pool: New argument ``allow_restart`` is now required to enable the pool process sentinel that is required to restart the pool. It's disabled by default, which reduces the number of file descriptors/semaphores required to run the pool. - Pool: Now emits a warning if a worker process exited with error-code. But not if the error code is 155, which is now returned if the worker process was recycled (maxtasksperchild). - Python 3 compatibility fixes. - Python 2.5 compatibility fixes. 2.7.3.10 - 2012-06-26 --------------------- - The ``TimeLimitExceeded`` exception string representation only included the seconds as a number, it now gives a more human friendly description. - Fixed typo in ``LaxBoundedSemaphore.shrink``. - Pool: ``ResultHandler.handle_event`` no longer requires any arguments. - setup.py bdist now works 2.7.3.9 - 2012-06-03 -------------------- - Environment variable ``MP_MAIN_FILE`` envvar is now set to the path of the ``__main__`` module when execv is enabled. - Pool: Errors occurring in the TaskHandler are now reported. 2.7.3.8 - 2012-06-01 -------------------- - Can now be installed on Py 3.2 - Issue #12091: simplify ApplyResult and MapResult with threading.Event Patch by Charles-Francois Natali - Pool: Support running without TimeoutHandler thread. - The with_*_thread arguments has also been replaced with a single `threads=True` argument. - Two new pool callbacks: - ``on_timeout_set(job, soft, hard)`` Applied when a task is executed with a timeout. - ``on_timeout_cancel(job)`` Applied when a timeout is cancelled (the job completed) 2.7.3.7 - 2012-05-21 -------------------- - Fixes Python 2.5 support. 2.7.3.6 - 2012-05-21 -------------------- - Pool: Can now be used in an event loop, without starting the supporting threads (TimeoutHandler still not supported) To facilitate this the pool has gained the following keyword arguments: - ``with_task_thread`` - ``with_result_thread`` - ``with_supervisor_thread`` - ``on_process_up`` Callback called with Process instance as argument whenever a new worker process is added. Used to add new process fds to the eventloop:: def on_process_up(proc): hub.add_reader(proc.sentinel, pool.maintain_pool) - ``on_process_down`` Callback called with Process instance as argument whenever a new worker process is found dead. Used to remove process fds from the eventloop:: def on_process_down(proc): hub.remove(proc.sentinel) - ``semaphore`` Sets the semaphore used to protect from adding new items to the pool when no processes available. The default is a threaded one, so this can be used to change to an async semaphore. And the following attributes:: - ``readers`` A map of ``fd`` -> ``callback``, to be registered in an eventloop. Currently this is only the result outqueue with a callback that processes all currently incoming results. And the following methods:: - ``did_start_ok`` To be called after starting the pool, and after setting up the eventloop with the pool fds, to ensure that the worker processes didn't immediately exit caused by an error (internal/memory). - ``maintain_pool`` Public version of ``_maintain_pool`` that handles max restarts. - Pool: Process too frequent restart protection now only counts if the process had a non-successful exit-code. This to take into account the maxtasksperchild option, and allowing processes to exit cleanly on their own. - Pool: New options max_restart + max_restart_freq This means that the supervisor can't restart processes faster than max_restart' times per max_restart_freq seconds (like the Erlang supervisor maxR & maxT settings). The pool is closed and joined if the max restart frequency is exceeded, where previously it would keep restarting at an unlimited rate, possibly crashing the system. The current default value is to stop if it exceeds 100 * process_count restarts in 1 seconds. This may change later. It will only count processes with an unsuccessful exit code, this is to take into account the ``maxtasksperchild`` setting and code that voluntarily exits. - Pool: The ``WorkerLostError`` message now includes the exit-code of the process that disappeared. 2.7.3.5 - 2012-05-09 -------------------- - Now always cleans up after ``sys.exc_info()`` to avoid cyclic references. - ExceptionInfo without arguments now defaults to ``sys.exc_info``. - Forking can now be disabled using the ``MULTIPROCESSING_FORKING_DISABLE`` environment variable. Also this envvar is set so that the behavior is inherited after execv. - The semaphore cleanup process started when execv is used now sets a useful process name if the ``setproctitle`` module is installed. - Sets the ``FORKED_BY_MULTIPROCESSING`` environment variable if forking is disabled. 2.7.3.4 - 2012-04-27 -------------------- - Added `billiard.ensure_multiprocessing()` Raises NotImplementedError if the platform does not support multiprocessing (e.g. Jython). 2.7.3.3 - 2012-04-23 -------------------- - PyPy now falls back to using its internal _multiprocessing module, so everything works except for forking_enable(False) (which silently degrades). - Fixed Python 2.5 compat. issues. - Uses more with statements - Merged some of the changes from the Python 3 branch. 2.7.3.2 - 2012-04-20 -------------------- - Now installs on PyPy/Jython (but does not work). 2.7.3.1 - 2012-04-20 -------------------- - Python 2.5 support added. 2.7.3.0 - 2012-04-20 -------------------- - Updated from Python 2.7.3 - Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7. (may consider py3k support at some point). - Pool improvements from Celery. - no-execv patch added (http://bugs.python.org/issue8713) Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: C Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: System :: Distributed Computing billiard-3.3.0.15/billiard.egg-info/SOURCES.txt0000644000076500000000000000306612276217616021314 0ustar asksolwheel00000000000000CHANGES.txt INSTALL.txt LICENSE.txt MANIFEST.in Makefile README.rst pavement.py setup.cfg setup.py Doc/conf.py Doc/glossary.rst Doc/index.rst Doc/includes/__init__.py Doc/includes/mp_benchmarks.py Doc/includes/mp_newtype.py Doc/includes/mp_pool.py Doc/includes/mp_synchronize.py Doc/includes/mp_webserver.py Doc/includes/mp_workers.py Doc/library/multiprocessing.rst Modules/_billiard/connection.h Modules/_billiard/multiprocessing.c Modules/_billiard/multiprocessing.h Modules/_billiard/pipe_connection.c Modules/_billiard/semaphore.c Modules/_billiard/socket_connection.c Modules/_billiard/win32_functions.c billiard/__init__.py billiard/_ext.py billiard/_win.py billiard/common.py billiard/compat.py billiard/connection.py billiard/einfo.py billiard/exceptions.py billiard/five.py billiard/forking.py billiard/heap.py billiard/managers.py billiard/pool.py billiard/process.py billiard/queues.py billiard/reduction.py billiard/sharedctypes.py billiard/synchronize.py billiard/util.py billiard.egg-info/PKG-INFO billiard.egg-info/SOURCES.txt billiard.egg-info/dependency_links.txt billiard.egg-info/not-zip-safe billiard.egg-info/top_level.txt billiard/dummy/__init__.py billiard/dummy/connection.py billiard/py2/__init__.py billiard/py2/connection.py billiard/py2/reduction.py billiard/tests/__init__.py billiard/tests/compat.py billiard/tests/test_common.py billiard/tests/test_package.py billiard/tests/utils.py funtests/__init__.py funtests/setup.py funtests/tests/__init__.py funtests/tests/test_multiprocessing.py requirements/test-ci.txt requirements/test.txt requirements/test3.txtbilliard-3.3.0.15/billiard.egg-info/top_level.txt0000644000076500000000000000003412276217616022152 0ustar asksolwheel00000000000000funtests billiard _billiard billiard-3.3.0.15/CHANGES.txt0000644000076500000000000004125512276217454015767 0ustar asksolwheel000000000000003.3.0.15 - 2014-02-10 --------------------- - Pool: Fixed "cannot join process not started" error. - Now uses billiard.py2 and billiard.py3 specific packages that are installed depending on the python version used. This way the installation will not import version specific modules (and possibly crash). 3.3.0.14 - 2014-01-17 --------------------- - Fixed problem with our backwards compatible ``bytes`` wrapper (Issue #103). - No longer expects frozen applications to have a valid ``__file__`` attribute. Fix contributed by George Sibble. 3.3.0.13 - 2013-12-13 --------------------- - Fixes compatability with Python < 2.7.6 - No longer attempts to handle ``SIGBUS`` Contributed by Vishal Vatsa. - Non-thread based pool now only handles signals: ``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``, ``SIGUSR2``. - setup.py: Only show compilation warning for build related commands. 3.3.0.12 - 2013-12-09 --------------------- - Fixed installation for Python 3. Contributed by Rickert Mulder. - Pool: Fixed bug with maxtasksperchild. Fix contributed by Ionel Cristian Maries. - Pool: Fixed bug in maintain_pool. 3.3.0.11 - 2013-12-03 --------------------- - Fixed Unicode error when installing the distribution (Issue #89). - Daemonic processes are now allowed to have children. But note that it will not be possible to automatically terminate them when the process exits. See discussion at https://github.com/celery/celery/issues/1709 - Pool: Would not always be able to detect that a process exited. 3.3.0.10 - 2013-12-02 --------------------- - Windows: Fixed problem with missing ``WAITABANDONED_0`` Fix contributed by Matthias Wagner - Windows: PipeConnection can now be inherited. Fix contributed by Matthias Wagner 3.3.0.9 - 2013-12-02 -------------------- - Temporary workaround for Celery maxtasksperchild issue. Fix contributed by Ionel Cristian Maries. 3.3.0.8 - 2013-11-21 -------------------- - Now also sets ``multiprocessing.current_process`` for compatibility with loggings ``processName`` field. 3.3.0.7 - 2013-11-15 -------------------- - Fixed compatibility with PyPy 2.1 + 2.2. - Fixed problem in pypy detection. Fix contributed by Tin Tvrtkovic. - Now uses ``ctypes.find_library`` instead of hardcoded path to find the OS X CoreServices framework. Fix contributed by Moritz Kassner. 3.3.0.6 - 2013-11-12 -------------------- - Now works without C extension again. - New ``_billiard.read(fd, buffer, [len, ])` function implements os.read with buffer support (new buffer API) - New pure-python implementation of ``Connection.send_offset``. 3.3.0.5 - 2013-11-11 -------------------- - All platforms except for Windows/PyPy/Jython now requires the C extension. 3.3.0.4 - 2013-11-11 -------------------- - Fixed problem with Python3 and setblocking. 3.3.0.3 - 2013-11-09 -------------------- - Now works on Windows again. 3.3.0.2 - 2013-11-08 -------------------- - ApplyResult.terminate() may be set to signify that the job must not be executed. It can be used in combination with Pool.terminate_job. - Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments to set the read or write end of the pipe to be nonblocking. - Pool: Log message included exception info but exception happened in another process so the resulting traceback was wrong. - Pool: Worker process can now prepare results before they are sent back to the main process (using ``Worker.prepare_result``). 3.3.0.1 - 2013-11-04 -------------------- - Pool: New ``correlation_id`` argument to ``apply_async`` can be used to set a related id for the ``ApplyResult`` object returned: >>> r = pool.apply_async(target, args, kwargs, correlation_id='foo') >>> r.correlation_id 'foo' - Pool: New callback `on_process_exit` is called when a pool process exits, with signature ``(pid, exitcode)``. Contributed by Daniel M. Taub. - Pool: Improved the too many restarts detection. 3.3.0.0 - 2013-10-14 -------------------- - Dual code base now runs on Python 2.6+ and Python 3. - No longer compatible with Python 2.5 - Includes many changes from multiprocessing in 3.4. - Now uses ``time.monotonic`` when available, also including fallback implementations for Linux and OS X. - No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE Contributed by Kevin Blackham - ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing. It's better to import these from multiprocessing directly now so that there aren't multiple registries. - New `billiard.queues._SimpleQueue` that does not use semaphores. - Pool: Can now be extended to support using multiple IPC queues. - Pool: Can now use async I/O to write to pool IPC queues. - Pool: New ``Worker.on_loop_stop`` handler can be used to add actions at pool worker process shutdown. Note that, like all finalization handlers, there is no guarantee that this will be executed. Contributed by dmtaub. 2.7.3.30 - 2013-06-28 --------------------- - Fixed ImportError in billiard._ext 2.7.3.29 - 2013-06-28 --------------------- - Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55) Fix contributed by Krzysztof Jagiello. - Process now releases logging locks after fork. This previously happened in Pool, but it was done too late as processes logs when they bootstrap. - Pool.terminate_job now ignores `No such process` errors. - billiard.Pool entrypoint did not support new arguments to billiard.pool.Pool - Connection inbound buffer size increased from 1kb to 128kb. - C extension cleaned up by properly adding a namespace to symbols. - _exit_function now works even if thread wakes up after gc collect. 2.7.3.28 - 2013-04-16 --------------------- - Pool: Fixed regression that disabled the deadlock fix in 2.7.3.24 - Pool: RestartFreqExceeded could be raised prematurely. - Process: Include pid in startup and process INFO logs. 2.7.3.27 - 2013-04-12 --------------------- - Manager now works again. - Python 3 fixes for billiard.connection. - Fixed invalid argument bug when running on Python 3.3 Fix contributed by Nathan Wan. - Ignore OSError when setting up signal handlers. 2.7.3.26 - 2013-04-09 --------------------- - Pool: Child processes must ignore SIGINT. 2.7.3.25 - 2013-04-09 --------------------- - Pool: 2.7.3.24 broke support for subprocesses (Issue #48). Signals that should be ignored were instead handled by terminating. 2.7.3.24 - 2013-04-08 --------------------- - Pool: Make sure finally blocks are called when process exits due to a signal. This fixes a deadlock problem when the process is killed while having acquired the shared semaphore. However, this solution does not protect against the processes being killed, a more elaborate solution is required for that. Hopefully this will be fixed soon in a later version. - Pool: Can now use GDB to debug pool child processes. - Fixes Python 3 compatibility problems. Contributed by Albertas Agejevas. 2.7.3.23 - 2013-03-22 --------------------- - Windows: Now catches SystemExit from setuptools while trying to build the C extension (Issue #41). 2.7.3.22 - 2013-03-08 --------------------- - Pool: apply_async now supports a ``callbacks_propagate`` keyword argument that can be a tuple of exceptions to propagate in callbacks. (callback, errback, accept_callback, timeout_callback). - Errors are no longer logged for OK and recycle exit codes. This would cause normal maxtasksperchild recycled process to log an error. - Fixed Python 2.5 compatibility problem (Issue #33). - FreeBSD: Compilation now disables semaphores if Python was built without it (Issue #40). Contributed by William Grzybowski 2.7.3.21 - 2013-02-11 --------------------- - Fixed typo EX_REUSE -> EX_RECYCLE - Code now conforms to new pep8.py rules. 2.7.3.20 - 2013-02-08 --------------------- - Pool: Disable restart limit if maxR is not set. - Pool: Now uses os.kill instead of signal.signal. Contributed by Lukasz Langa - Fixed name error in process.py - Pool: ApplyResult.get now properly raises exceptions. Fix contributed by xentac. 2.7.3.19 - 2012-11-30 --------------------- - Fixes problem at shutdown when gc has collected symbols. - Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32). - Fixes Python 3 compatibility issues 2.7.3.18 - 2012-11-05 --------------------- - [Pool] Fix for check_timeouts if not set. Fix contributed by Dmitry Sukhov - Fixed pickle problem with Traceback. Code.frame.__loader__ is now ignored as it may be set to an unpickleable object. - The Django old-layout warning was always showing. 2.7.3.17 - 2012-09-26 --------------------- - Fixes typo 2.7.3.16 - 2012-09-26 --------------------- - Windows: Fixes for SemLock._rebuild (Issue #24). - Pool: Job terminated with terminate_job now raises billiard.exceptions.Terminated. 2.7.3.15 - 2012-09-21 --------------------- - Windows: Fixes unpickling of SemLock when using fallback. - Windows: Fixes installation when no C compiler. 2.7.3.14 - 2012-09-20 --------------------- - Installation now works again for Python 3. 2.7.3.13 - 2012-09-14 --------------------- - Merged with Python trunk (many authors, many fixes: see Python changelog in trunk). - Using execv now also works with older Django projects using setup_environ (Issue #10). - Billiard now installs with a warning that the C extension could not be built if a compiler is not installed or the build fails in some other way. It really is recommended to have the C extension installed when running with force execv, but this change also makes it easier to install. - Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions cannot block the signal. Python signal handlers are called in the interpreter, so they cannot be called while a C extension is blocking the interpreter from running. - Now uses a timeout value for Thread.join that doesn't exceed the maximum on some platforms. - Fixed bug in the SemLock fallback used when C extensions not installed. Fix contributed by Mher Movsisyan. - Pool: Now sets a Process.index attribute for every process in the pool. This number will always be between 0 and concurrency-1, and can be used to e.g. create a logfile for each process in the pool without creating a new logfile whenever a process is replaced. 2.7.3.12 - 2012-08-05 --------------------- - Fixed Python 2.5 compatibility issue. - New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError 2.7.3.11 - 2012-08-01 --------------------- - Adds support for FreeBSD 7+ Fix contributed by koobs. - Pool: New argument ``allow_restart`` is now required to enable the pool process sentinel that is required to restart the pool. It's disabled by default, which reduces the number of file descriptors/semaphores required to run the pool. - Pool: Now emits a warning if a worker process exited with error-code. But not if the error code is 155, which is now returned if the worker process was recycled (maxtasksperchild). - Python 3 compatibility fixes. - Python 2.5 compatibility fixes. 2.7.3.10 - 2012-06-26 --------------------- - The ``TimeLimitExceeded`` exception string representation only included the seconds as a number, it now gives a more human friendly description. - Fixed typo in ``LaxBoundedSemaphore.shrink``. - Pool: ``ResultHandler.handle_event`` no longer requires any arguments. - setup.py bdist now works 2.7.3.9 - 2012-06-03 -------------------- - Environment variable ``MP_MAIN_FILE`` envvar is now set to the path of the ``__main__`` module when execv is enabled. - Pool: Errors occurring in the TaskHandler are now reported. 2.7.3.8 - 2012-06-01 -------------------- - Can now be installed on Py 3.2 - Issue #12091: simplify ApplyResult and MapResult with threading.Event Patch by Charles-Francois Natali - Pool: Support running without TimeoutHandler thread. - The with_*_thread arguments has also been replaced with a single `threads=True` argument. - Two new pool callbacks: - ``on_timeout_set(job, soft, hard)`` Applied when a task is executed with a timeout. - ``on_timeout_cancel(job)`` Applied when a timeout is cancelled (the job completed) 2.7.3.7 - 2012-05-21 -------------------- - Fixes Python 2.5 support. 2.7.3.6 - 2012-05-21 -------------------- - Pool: Can now be used in an event loop, without starting the supporting threads (TimeoutHandler still not supported) To facilitate this the pool has gained the following keyword arguments: - ``with_task_thread`` - ``with_result_thread`` - ``with_supervisor_thread`` - ``on_process_up`` Callback called with Process instance as argument whenever a new worker process is added. Used to add new process fds to the eventloop:: def on_process_up(proc): hub.add_reader(proc.sentinel, pool.maintain_pool) - ``on_process_down`` Callback called with Process instance as argument whenever a new worker process is found dead. Used to remove process fds from the eventloop:: def on_process_down(proc): hub.remove(proc.sentinel) - ``semaphore`` Sets the semaphore used to protect from adding new items to the pool when no processes available. The default is a threaded one, so this can be used to change to an async semaphore. And the following attributes:: - ``readers`` A map of ``fd`` -> ``callback``, to be registered in an eventloop. Currently this is only the result outqueue with a callback that processes all currently incoming results. And the following methods:: - ``did_start_ok`` To be called after starting the pool, and after setting up the eventloop with the pool fds, to ensure that the worker processes didn't immediately exit caused by an error (internal/memory). - ``maintain_pool`` Public version of ``_maintain_pool`` that handles max restarts. - Pool: Process too frequent restart protection now only counts if the process had a non-successful exit-code. This to take into account the maxtasksperchild option, and allowing processes to exit cleanly on their own. - Pool: New options max_restart + max_restart_freq This means that the supervisor can't restart processes faster than max_restart' times per max_restart_freq seconds (like the Erlang supervisor maxR & maxT settings). The pool is closed and joined if the max restart frequency is exceeded, where previously it would keep restarting at an unlimited rate, possibly crashing the system. The current default value is to stop if it exceeds 100 * process_count restarts in 1 seconds. This may change later. It will only count processes with an unsuccessful exit code, this is to take into account the ``maxtasksperchild`` setting and code that voluntarily exits. - Pool: The ``WorkerLostError`` message now includes the exit-code of the process that disappeared. 2.7.3.5 - 2012-05-09 -------------------- - Now always cleans up after ``sys.exc_info()`` to avoid cyclic references. - ExceptionInfo without arguments now defaults to ``sys.exc_info``. - Forking can now be disabled using the ``MULTIPROCESSING_FORKING_DISABLE`` environment variable. Also this envvar is set so that the behavior is inherited after execv. - The semaphore cleanup process started when execv is used now sets a useful process name if the ``setproctitle`` module is installed. - Sets the ``FORKED_BY_MULTIPROCESSING`` environment variable if forking is disabled. 2.7.3.4 - 2012-04-27 -------------------- - Added `billiard.ensure_multiprocessing()` Raises NotImplementedError if the platform does not support multiprocessing (e.g. Jython). 2.7.3.3 - 2012-04-23 -------------------- - PyPy now falls back to using its internal _multiprocessing module, so everything works except for forking_enable(False) (which silently degrades). - Fixed Python 2.5 compat. issues. - Uses more with statements - Merged some of the changes from the Python 3 branch. 2.7.3.2 - 2012-04-20 -------------------- - Now installs on PyPy/Jython (but does not work). 2.7.3.1 - 2012-04-20 -------------------- - Python 2.5 support added. 2.7.3.0 - 2012-04-20 -------------------- - Updated from Python 2.7.3 - Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7. (may consider py3k support at some point). - Pool improvements from Celery. - no-execv patch added (http://bugs.python.org/issue8713) billiard-3.3.0.15/Doc/0000755000076500000000000000000012276217622014651 5ustar asksolwheel00000000000000billiard-3.3.0.15/Doc/conf.py0000644000076500000000000001374712270022117016147 0ustar asksolwheel00000000000000# -*- coding: utf-8 -*- # # multiprocessing documentation build configuration file, created by # sphinx-quickstart on Wed Nov 26 12:47:00 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath('.')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'multiprocessing' copyright = u'2008, Python Software Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. import os import sys sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) import billiard # # The short X.Y version. version = billiard.__version__ # The full version, including alpha/beta/rc tags. release = billiard.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'multiprocessingdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'multiprocessing.tex', ur'multiprocessing Documentation', ur'Python Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True billiard-3.3.0.15/Doc/glossary.rst0000644000076500000000000000330112270022117017226 0ustar asksolwheel00000000000000.. _glossary: ******** Glossary ******** .. glossary:: bytecode Python source code is compiled into bytecode, the internal representation of a Python program in the interpreter. The bytecode is also cached in ``.pyc`` and ``.pyo`` files so that executing the same file is faster the second time (recompilation from source to bytecode can be avoided). This "intermediate language" is said to run on a :term:`virtual machine` that executes the machine code corresponding to each bytecode. CPython The canonical implementation of the Python programming language. The term "CPython" is used in contexts when necessary to distinguish this implementation from others such as Jython or IronPython. GIL See :term:`global interpreter lock`. global interpreter lock The lock used by Python threads to assure that only one thread executes in the :term:`CPython` :term:`virtual machine` at a time. This simplifies the CPython implementation by assuring that no two processes can access the same memory at the same time. Locking the entire interpreter makes it easier for the interpreter to be multi-threaded, at the expense of much of the parallelism afforded by multi-processor machines. Efforts have been made in the past to create a "free-threaded" interpreter (one which locks shared data at a much finer granularity), but so far none have been successful because performance suffered in the common single-processor case. virtual machine A computer defined entirely in software. Python's virtual machine executes the :term:`bytecode` emitted by the bytecode compiler. billiard-3.3.0.15/Doc/includes/0000755000076500000000000000000012276217622016457 5ustar asksolwheel00000000000000billiard-3.3.0.15/Doc/includes/__init__.py0000644000076500000000000000001312270022117020545 0ustar asksolwheel00000000000000# package billiard-3.3.0.15/Doc/includes/mp_benchmarks.py0000644000076500000000000001263712270022117021636 0ustar asksolwheel00000000000000# # Simple benchmarks for the multiprocessing package # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time, sys, multiprocessing, threading, Queue, gc if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in xrange(iterations): q.put(a) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print iterations, 'objects passed through the queue in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in xrange(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = multiprocessing.Pipe() cond = multiprocessing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = multiprocessing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print iterations, 'objects passed through connection in',elapsed,'seconds' print 'average number/sec:', iterations/elapsed #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in xrange(iterations): a = seq[5] elapsed = _timer()-t print iterations, 'iterations in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in xrange(iterations): l.acquire() l.release() elapsed = _timer()-t print iterations, 'iterations in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in xrange(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in xrange(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print iterations * 2, 'waits in', elapsed, 'seconds' print 'average number/sec:', iterations * 2 / elapsed #### def test(): manager = multiprocessing.Manager() gc.disable() print '\n\t######## testing Queue.Queue\n' test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print '\n\t######## testing multiprocessing.Queue\n' test_queuespeed(multiprocessing.Process, multiprocessing.Queue(), multiprocessing.Condition()) print '\n\t######## testing Queue managed by server process\n' test_queuespeed(multiprocessing.Process, manager.Queue(), manager.Condition()) print '\n\t######## testing multiprocessing.Pipe\n' test_pipespeed() print print '\n\t######## testing list\n' test_seqspeed(range(10)) print '\n\t######## testing list managed by server process\n' test_seqspeed(manager.list(range(10))) print '\n\t######## testing Array("i", ..., lock=False)\n' test_seqspeed(multiprocessing.Array('i', range(10), lock=False)) print '\n\t######## testing Array("i", ..., lock=True)\n' test_seqspeed(multiprocessing.Array('i', range(10), lock=True)) print print '\n\t######## testing threading.Lock\n' test_lockspeed(threading.Lock()) print '\n\t######## testing threading.RLock\n' test_lockspeed(threading.RLock()) print '\n\t######## testing multiprocessing.Lock\n' test_lockspeed(multiprocessing.Lock()) print '\n\t######## testing multiprocessing.RLock\n' test_lockspeed(multiprocessing.RLock()) print '\n\t######## testing lock managed by server process\n' test_lockspeed(manager.Lock()) print '\n\t######## testing rlock managed by server process\n' test_lockspeed(manager.RLock()) print print '\n\t######## testing threading.Condition\n' test_conditionspeed(threading.Thread, threading.Condition()) print '\n\t######## testing multiprocessing.Condition\n' test_conditionspeed(multiprocessing.Process, multiprocessing.Condition()) print '\n\t######## testing condition managed by a server process\n' test_conditionspeed(multiprocessing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': multiprocessing.freeze_support() test() billiard-3.3.0.15/Doc/includes/mp_newtype.py0000644000076500000000000000435412270022117021211 0ustar asksolwheel00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # from multiprocessing import freeze_support from multiprocessing.managers import BaseManager, BaseProxy import operator ## class Foo(object): def f(self): print 'you called Foo.f()' def g(self): print 'you called Foo.g()' def _h(self): print 'you called Foo._h()' # A simple generator function def baz(): for i in xrange(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') # Function to return the operator module def get_operator_module(): return operator ## class MyManager(BaseManager): pass # register the Foo class; make `f()` and `g()` accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) # register get_operator_module(); make public functions accessible via proxy MyManager.register('operator', get_operator_module) ## def test(): manager = MyManager() manager.start() print '-' * 20 f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') assert sorted(f1._exposed_) == sorted(['f', 'g']) print '-' * 20 f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') assert sorted(f2._exposed_) == sorted(['g', '_h']) print '-' * 20 it = manager.baz() for i in it: print '<%d>' % i, print print '-' * 20 op = manager.operator() print 'op.add(23, 45) =', op.add(23, 45) print 'op.pow(2, 94) =', op.pow(2, 94) print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6) print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3) print 'op._exposed_ =', op._exposed_ ## if __name__ == '__main__': freeze_support() test() billiard-3.3.0.15/Doc/includes/mp_pool.py0000644000076500000000000001555712270022117020476 0ustar asksolwheel00000000000000# # A test of `multiprocessing.Pool` class # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import multiprocessing import time import random import sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % ( multiprocessing.current_process().name, func.__name__, args, result ) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print 'cpu_count() = %d\n' % multiprocessing.cpu_count() # # Create pool # PROCESSES = 4 print 'Creating pool with %d processes\n' % PROCESSES pool = multiprocessing.Pool(PROCESSES) print 'pool = %s' % pool print # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print 'Ordered results using pool.apply_async():' for r in results: print '\t', r.get() print print 'Ordered results using pool.imap():' for x in imap_it: print '\t', x print print 'Unordered results using pool.imap_unordered():' for x in imap_unordered_it: print '\t', x print print 'Ordered results using pool.map() --- will block till complete:' for x in pool.map(calculatestar, TASKS): print '\t', x print # # Simple benchmarks # N = 100000 print 'def pow3(x): return x**3' t = time.time() A = map(pow3, xrange(N)) print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \ (N, time.time() - t) t = time.time() B = pool.map(pow3, xrange(N)) print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \ (N, time.time() - t) t = time.time() C = list(pool.imap(pow3, xrange(N), chunksize=N//8)) print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t) assert A == B == C, (len(A), len(B), len(C)) print L = [None] * 1000000 print 'def noop(x): pass' print 'L = [None] * 1000000' t = time.time() A = map(noop, L) print '\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t) t = time.time() B = pool.map(noop, L) print '\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t) assert A == B == C, (len(A), len(B), len(C)) print del A, B, C, L # # Test error handling # print 'Testing error handling:' try: print pool.apply(f, (5,)) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from pool.apply()' else: raise AssertionError, 'expected ZeroDivisionError' try: print pool.map(f, range(10)) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from pool.map()' else: raise AssertionError, 'expected ZeroDivisionError' try: print list(pool.imap(f, range(10))) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from list(pool.imap())' else: raise AssertionError, 'expected ZeroDivisionError' it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError, 'expected ZeroDivisionError' assert i == 9 print '\tGot ZeroDivisionError as expected from IMapIterator.next()' print # # Testing timeouts # print 'Testing ApplyResult.get() with timeout:', res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except multiprocessing.TimeoutError: sys.stdout.write('.') print print print 'Testing IMapIterator.next() with timeout:', it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except multiprocessing.TimeoutError: sys.stdout.write('.') print print # # Testing callback # print 'Testing callback:' A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print '\tcallbacks succeeded\n' else: print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print 'Testing close():' for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print '\tclose() succeeded\n' # # Check terminate() method # print 'Testing terminate():' pool = multiprocessing.Pool(2) DELTA = 0.1 ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print '\tterminate() succeeded\n' # # Check garbage collection # print 'Testing garbage collection:' pool = multiprocessing.Pool(2) DELTA = 0.1 processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] results = pool = None time.sleep(DELTA * 2) for worker in processes: assert not worker.is_alive() print '\tgarbage collection succeeded\n' if __name__ == '__main__': multiprocessing.freeze_support() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print ' Using processes '.center(79, '-') elif sys.argv[1] == 'threads': print ' Using threads '.center(79, '-') import multiprocessing.dummy as multiprocessing else: print 'Usage:\n\t%s [processes | threads]' % sys.argv[0] raise SystemExit(2) test() billiard-3.3.0.15/Doc/includes/mp_synchronize.py0000644000076500000000000001401212270022117022061 0ustar asksolwheel00000000000000# # A test file for the `multiprocessing` package # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time, sys, random from Queue import Empty import multiprocessing # may get overwritten #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished' running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = multiprocessing.Value('i', TASKS) mutex = multiprocessing.Lock() for i in range(TASKS): p = multiprocessing.Process(target=value_func, args=(running, mutex)) p.start() while running.value > 0: time.sleep(0.08) mutex.acquire() print running.value, sys.stdout.flush() mutex.release() print print 'No more running processes' #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = multiprocessing.Queue() p = multiprocessing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print o, sys.stdout.flush() except Empty: print 'TIMEOUT' print #### TEST_CONDITION def condition_func(cond): cond.acquire() print '\t' + str(cond) time.sleep(2) print '\tchild is notifying' print '\t' + str(cond) cond.notify() cond.release() def test_condition(): cond = multiprocessing.Condition() p = multiprocessing.Process(target=condition_func, args=(cond,)) print cond cond.acquire() print cond cond.acquire() print cond p.start() print 'main is waiting' cond.wait() print 'main has woken up' print cond cond.release() print cond cond.release() p.join() print cond #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print running.value, 'tasks are running' mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print '%s has finished' % multiprocessing.current_process() mutex.release() sema.release() def test_semaphore(): sema = multiprocessing.Semaphore(3) mutex = multiprocessing.RLock() running = multiprocessing.Value('i', 0) processes = [ multiprocessing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print '\tchild sleeping' time.sleep(5.5) print '\n\tchild terminating' def test_join_timeout(): p = multiprocessing.Process(target=join_timeout_func) p.start() print 'waiting for process to finish' while 1: p.join(timeout=1) if not p.is_alive(): break print '.', sys.stdout.flush() #### TEST_EVENT def event_func(event): print '\t%r is waiting' % multiprocessing.current_process() event.wait() print '\t%r has woken up' % multiprocessing.current_process() def test_event(): event = multiprocessing.Event() processes = [multiprocessing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print 'main is sleeping' time.sleep(2) print 'main is setting event' event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert a == sa print 'Tests passed' def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [multiprocessing.Value(id, v) for id, v in values] shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays] p = multiprocessing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=multiprocessing): global multiprocessing multiprocessing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print '\n\t######## %s\n' % func.__name__ func() ignore = multiprocessing.active_children() # cleanup any old processes if hasattr(multiprocessing, '_debug_info'): info = multiprocessing._debug_info() if info: print info raise ValueError, 'there should be no positive refcounts left' if __name__ == '__main__': multiprocessing.freeze_support() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print ' Using processes '.center(79, '-') namespace = multiprocessing elif sys.argv[1] == 'manager': print ' Using processes and a manager '.center(79, '-') namespace = multiprocessing.Manager() namespace.Process = multiprocessing.Process namespace.current_process = multiprocessing.current_process namespace.active_children = multiprocessing.active_children elif sys.argv[1] == 'threads': print ' Using threads '.center(79, '-') import multiprocessing.dummy as namespace else: print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0] raise SystemExit, 2 test(namespace) billiard-3.3.0.15/Doc/includes/mp_webserver.py0000644000076500000000000000401212270022117021511 0ustar asksolwheel00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `multiprocessing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import os import sys from multiprocessing import Process, current_process, freeze_support from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocessing.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print 'Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES) print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'] os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freeze_support() test() billiard-3.3.0.15/Doc/includes/mp_workers.py0000644000076500000000000000401512270022117021204 0ustar asksolwheel00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time import random from multiprocessing import Process, Queue, current_process, freeze_support # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (current_process().name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks for task in TASKS1: task_queue.put(task) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print 'Unordered results:' for i in range(len(TASKS1)): print '\t', done_queue.get() # Add more tasks using `put()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print '\t', done_queue.get() # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freeze_support() test() billiard-3.3.0.15/Doc/index.rst0000644000076500000000000000074012270022117016476 0ustar asksolwheel00000000000000.. multiprocessing documentation master file, created by sphinx-quickstart on Wed Nov 26 12:47:00 2008. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to multiprocessing's documentation! =========================================== Contents: .. toctree:: library/multiprocessing.rst glossary.rst Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` billiard-3.3.0.15/Doc/library/0000755000076500000000000000000012276217622016315 5ustar asksolwheel00000000000000billiard-3.3.0.15/Doc/library/multiprocessing.rst0000644000076500000000000025074712270022117022300 0ustar asksolwheel00000000000000:mod:`multiprocessing` --- Process-based parallelism ==================================================== .. module:: multiprocessing :synopsis: Process-based parallelism. Introduction ------------ :mod:`multiprocessing` is a package that supports spawning processes using an API similar to the :mod:`threading` module. The :mod:`multiprocessing` package offers both local and remote concurrency, effectively side-stepping the :term:`Global Interpreter Lock` by using subprocesses instead of threads. Due to this, the :mod:`multiprocessing` module allows the programmer to fully leverage multiple processors on a given machine. It runs on both Unix and Windows. .. note:: Some of this package's functionality requires a functioning shared semaphore implementation on the host operating system. Without one, the :mod:`multiprocessing.synchronize` module will be disabled, and attempts to import it will result in an :exc:`ImportError`. See :issue:`3770` for additional information. .. note:: Functionality within this package requires that the ``__main__`` module be importable by the children. This is covered in :ref:`multiprocessing-programming` however it is worth pointing out here. This means that some examples, such as the :class:`multiprocessing.Pool` examples will not work in the interactive interpreter. For example:: >>> from multiprocessing import Pool >>> p = Pool(5) >>> def f(x): ... return x*x ... >>> p.map(f, [1,2,3]) Process PoolWorker-1: Process PoolWorker-2: Process PoolWorker-3: Traceback (most recent call last): Traceback (most recent call last): Traceback (most recent call last): AttributeError: 'module' object has no attribute 'f' AttributeError: 'module' object has no attribute 'f' AttributeError: 'module' object has no attribute 'f' (If you try this it will actually output three full tracebacks interleaved in a semi-random fashion, and then you may have to stop the master process somehow.) The :class:`Process` class ~~~~~~~~~~~~~~~~~~~~~~~~~~ In :mod:`multiprocessing`, processes are spawned by creating a :class:`Process` object and then calling its :meth:`~Process.start` method. :class:`Process` follows the API of :class:`threading.Thread`. A trivial example of a multiprocess program is :: from multiprocessing import Process def f(name): print('hello', name) if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() To show the individual process IDs involved, here is an expanded example:: from multiprocessing import Process import os def info(title): print(title) print('module name:', __name__) print('parent process:', os.getppid()) print('process id:', os.getpid()) def f(name): info('function f') print('hello', name) if __name__ == '__main__': info('main line') p = Process(target=f, args=('bob',)) p.start() p.join() For an explanation of why (on Windows) the ``if __name__ == '__main__'`` part is necessary, see :ref:`multiprocessing-programming`. Exchanging objects between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :mod:`multiprocessing` supports two types of communication channel between processes: **Queues** The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For example:: from multiprocessing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print(q.get()) # prints "[42, None, 'hello']" p.join() Queues are thread and process safe, but note that they must never be instantiated as a side effect of importing a module: this can lead to a deadlock! (see :ref:`threaded-imports`) **Pipes** The :func:`Pipe` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example:: from multiprocessing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print(parent_conn.recv()) # prints "[42, None, 'hello']" p.join() The two connection objects returned by :func:`Pipe` represent the two ends of the pipe. Each connection object has :meth:`~Connection.send` and :meth:`~Connection.recv` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. Synchronization between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :mod:`multiprocessing` contains equivalents of all the synchronization primitives from :mod:`threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from multiprocessing import Process, Lock def f(l, i): l.acquire() print('hello world', i) l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then :mod:`multiprocessing` provides a couple of ways of doing so. **Shared memory** Data can be stored in a shared memory map using :class:`Value` or :class:`Array`. For example, the following code :: from multiprocessing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print(num.value) print(arr[:]) will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The ``'d'`` and ``'i'`` arguments used when creating ``num`` and ``arr`` are typecodes of the kind used by the :mod:`array` module: ``'d'`` indicates a double precision float and ``'i'`` indicates a signed integer. These shared objects will be process and thread-safe. For more flexibility in using shared memory one can use the :mod:`multiprocessing.sharedctypes` module which supports the creation of arbitrary ctypes objects allocated from shared memory. **Server process** A manager object returned by :func:`Manager` controls a server process which holds Python objects and allows other processes to manipulate them using proxies. A manager returned by :func:`Manager` will support types :class:`list`, :class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`, :class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`, :class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For example, :: from multiprocessing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print(d) print(l) will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. Using a pool of workers ~~~~~~~~~~~~~~~~~~~~~~~ The :class:`~multiprocessing.pool.Pool` class represents a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from multiprocessing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.apply_async(f, [10]) # evaluate "f(10)" asynchronously print(result.get(timeout=1)) # prints "100" unless your computer is *very* slow print(pool.map(f, range(10))) # prints "[0, 1, 4,..., 81]" Reference --------- The :mod:`multiprocessing` package mostly replicates the API of the :mod:`threading` module. :class:`Process` and exceptions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. class:: Process([group[, target[, name[, args[, kwargs]]]]], daemon=None) Process objects represent activity that is run in a separate process. The :class:`Process` class has equivalents of all the methods of :class:`threading.Thread`. The constructor should always be called with keyword arguments. *group* should always be ``None``; it exists solely for compatibility with :class:`threading.Thread`. *target* is the callable object to be invoked by the :meth:`run()` method. It defaults to ``None``, meaning nothing is called. *name* is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. *args* is the argument tuple for the target invocation. *kwargs* is a dictionary of keyword arguments for the target invocation. If provided, the keyword-only *daemon* argument sets the process :attr:`daemon` flag to ``True`` or ``False``. If ``None`` (the default), this flag will be inherited from the creating process. By default, no arguments are passed to *target*. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (:meth:`Process.__init__`) before doing anything else to the process. .. versionchanged:: 3.3 Added the *daemon* argument. .. method:: run() Method representing the process's activity. You may override this method in a subclass. The standard :meth:`run` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the *args* and *kwargs* arguments, respectively. .. method:: start() Start the process's activity. This must be called at most once per process object. It arranges for the object's :meth:`run` method to be invoked in a separate process. .. method:: join([timeout]) If the optional argument *timeout* is ``None`` (the default), the method blocks until the process whose :meth:`join` method is called terminates. If *timeout* is a positive number, it blocks at most *timeout* seconds. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. .. attribute:: name The process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. .. method:: is_alive Return whether the process is alive. Roughly, a process object is alive from the moment the :meth:`start` method returns until the child process terminates. .. attribute:: daemon The process's daemon flag, a Boolean value. This must be set before :meth:`start` is called. The initial value is inherited from the creating process. When a process exits, it attempts to terminate all of its daemonic child processes. Note that a daemonic process is not allowed to create child processes. Otherwise a daemonic process would leave its children orphaned if it gets terminated when its parent process exits. Additionally, these are **not** Unix daemons or services, they are normal processes that will be terminated (and not joined) if non-daemonic processes have exited. In addition to the :class:`Threading.Thread` API, :class:`Process` objects also support the following attributes and methods: .. attribute:: pid Return the process ID. Before the process is spawned, this will be ``None``. .. attribute:: exitcode The child's exit code. This will be ``None`` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. .. attribute:: authkey The process's authentication key (a byte string). When :mod:`multiprocessing` is initialized the main process is assigned a random string using :func:`os.random`. When a :class:`Process` object is created, it will inherit the authentication key of its parent process, although this may be changed by setting :attr:`authkey` to another byte string. See :ref:`multiprocessing-auth-keys`. .. attribute:: sentinel A numeric handle of a system object which will become "ready" when the process ends. You can use this value if you want to wait on several events at once using :func:`multiprocessing.connection.wait`. Otherwise calling :meth:`join()` is simpler. On Windows, this is an OS handle usable with the ``WaitForSingleObject`` and ``WaitForMultipleObjects`` family of API calls. On Unix, this is a file descriptor usable with primitives from the :mod:`select` module. .. versionadded:: 3.3 .. method:: terminate() Terminate the process. On Unix this is done using the ``SIGTERM`` signal; on Windows :c:func:`TerminateProcess` is used. Note that exit handlers and finally clauses, etc., will not be executed. Note that descendant processes of the process will *not* be terminated -- they will simply become orphaned. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the :meth:`start`, :meth:`join`, :meth:`is_alive`, :meth:`terminate` and :attr:`exit_code` methods should only be called by the process that created the process object. Example usage of some of the methods of :class:`Process`: .. doctest:: >>> import multiprocessing, time, signal >>> p = multiprocessing.Process(target=time.sleep, args=(1000,)) >>> print(p, p.is_alive()) False >>> p.start() >>> print(p, p.is_alive()) True >>> p.terminate() >>> time.sleep(0.1) >>> print(p, p.is_alive()) False >>> p.exitcode == -signal.SIGTERM True .. exception:: BufferTooShort Exception raised by :meth:`Connection.recv_bytes_into()` when the supplied buffer object is too small for the message read. If ``e`` is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give the message as a byte string. Pipes and Queues ~~~~~~~~~~~~~~~~ When using multiple processes, one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use :func:`Pipe` (for a connection between two processes) or a queue (which allows multiple producers and consumers). The :class:`Queue`, :class:`SimpleQueue` and :class:`JoinableQueue` types are multi-producer, multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the standard library. They differ in that :class:`Queue` lacks the :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join` methods introduced into Python 2.5's :class:`queue.Queue` class. If you use :class:`JoinableQueue` then you **must** call :meth:`JoinableQueue.task_done` for each task removed from the queue or else the semaphore used to count the number of unfinished tasks may eventually overflow, raising an exception. Note that one can also create a shared queue by using a manager object -- see :ref:`multiprocessing-managers`. .. note:: :mod:`multiprocessing` uses the usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions to signal a timeout. They are not available in the :mod:`multiprocessing` namespace so you need to import them from :mod:`queue`. .. warning:: If a process is killed using :meth:`Process.terminate` or :func:`os.kill` while it is trying to use a :class:`Queue`, then the data in the queue is likely to become corrupted. This may cause any other process to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used :meth:`JoinableQueue.cancel_join_thread`), then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all its non-daemonic children. Note that a queue created using a manager does not have this issue. See :ref:`multiprocessing-programming`. For an example of the usage of queues for interprocess communication see :ref:`multiprocessing-examples`. .. function:: Pipe([duplex]) Returns a pair ``(conn1, conn2)`` of :class:`Connection` objects representing the ends of a pipe. If *duplex* is ``True`` (the default) then the pipe is bidirectional. If *duplex* is ``False`` then the pipe is unidirectional: ``conn1`` can only be used for receiving messages and ``conn2`` can only be used for sending messages. .. class:: Queue([maxsize]) Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the standard library's :mod:`Queue` module are raised to signal timeouts. :class:`Queue` implements all the methods of :class:`Queue.Queue` except for :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join`. .. method:: qsize() Return the approximate size of the queue. Because of multithreading/multiprocessing semantics, this number is not reliable. Note that this may raise :exc:`NotImplementedError` on Unix platforms like Mac OS X where ``sem_getvalue()`` is not implemented. .. method:: empty() Return ``True`` if the queue is empty, ``False`` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. .. method:: full() Return ``True`` if the queue is full, ``False`` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. .. method:: put(obj[, block[, timeout]]) Put obj into the queue. If the optional argument *block* is ``True`` (the default) and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`queue.Full` exception if no free slot was available within that time. Otherwise (*block* is ``False``), put an item on the queue if a free slot is immediately available, else raise the :exc:`queue.Full` exception (*timeout* is ignored in that case). .. method:: put_nowait(obj) Equivalent to ``put(obj, False)``. .. method:: get([block[, timeout]]) Remove and return an item from the queue. If optional args *block* is ``True`` (the default) and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`queue.Empty` exception if no item was available within that time. Otherwise (block is ``False``), return an item if one is immediately available, else raise the :exc:`queue.Empty` exception (*timeout* is ignored in that case). .. method:: get_nowait() get_no_wait() Equivalent to ``get(False)``. :class:`multiprocessing.Queue` has a few additional methods not found in :class:`queue.Queue`. These methods are usually unnecessary for most code: .. method:: close() Indicate that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. .. method:: join_thread() Join the background thread. This can only be used after :meth:`close` has been called. It blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call :meth:`cancel_join_thread` to make :meth:`join_thread` do nothing. .. method:: cancel_join_thread() Prevent :meth:`join_thread` from blocking. In particular, this prevents the background thread from being joined automatically when the process exits -- see :meth:`join_thread`. .. class:: SimpleQueue() It is a simplified :class:`Queue` type, very close to a locked :class:`Pipe`. .. method:: empty() Return ``True`` if the queue is empty, ``False`` otherwise. .. method:: get() Remove and return an item from the queue. .. method:: put(item) Put *item* into the queue. .. class:: JoinableQueue([maxsize]) :class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which additionally has :meth:`task_done` and :meth:`join` methods. .. method:: task_done() Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`~Queue.get` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`~Queue.join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`~Queue.put` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. .. method:: join() Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`~Queue.join` unblocks. Miscellaneous ~~~~~~~~~~~~~ .. function:: active_children() Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. .. function:: cpu_count() Return the number of CPUs in the system. May raise :exc:`NotImplementedError`. .. function:: current_process() Return the :class:`Process` object corresponding to the current process. An analogue of :func:`threading.current_thread`. .. function:: freeze_support() Add support for when a program which uses :mod:`multiprocessing` has been frozen to produce a Windows executable. (Has been tested with **py2exe**, **PyInstaller** and **cx_Freeze**.) One needs to call this function straight after the ``if __name__ == '__main__'`` line of the main module. For example:: from multiprocessing import Process, freeze_support def f(): print('hello world!') if __name__ == '__main__': freeze_support() Process(target=f).start() If the ``freeze_support()`` line is omitted then trying to run the frozen executable will raise :exc:`RuntimeError`. If the module is being run normally by the Python interpreter then :func:`freeze_support` has no effect. .. function:: set_executable() Sets the path of the Python interpreter to use when starting a child process. (By default :data:`sys.executable` is used). Embedders will probably need to do some thing like :: set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe')) before they can create child processes. (Windows only) .. note:: :mod:`multiprocessing` contains no analogues of :func:`threading.active_count`, :func:`threading.enumerate`, :func:`threading.settrace`, :func:`threading.setprofile`, :class:`threading.Timer`, or :class:`threading.local`. Connection Objects ~~~~~~~~~~~~~~~~~~ Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects are usually created using :func:`Pipe` -- see also :ref:`multiprocessing-listeners-clients`. .. class:: Connection .. method:: send(obj) Send an object to the other end of the connection which should be read using :meth:`recv`. The object must be picklable. Very large pickles (approximately 32 MB+, though it depends on the OS) may raise a ValueError exception. .. method:: recv() Return an object sent from the other end of the connection using :meth:`send`. Blocks until there its something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end was closed. .. method:: fileno() Return the file descriptor or handle used by the connection. .. method:: close() Close the connection. This is called automatically when the connection is garbage collected. .. method:: poll([timeout]) Return whether there is any data available to be read. If *timeout* is not specified then it will return immediately. If *timeout* is a number then this specifies the maximum time in seconds to block. If *timeout* is ``None`` then an infinite timeout is used. Note that multiple connection objects may be polled at once by using :func:`multiprocessing.connection.wait`. .. method:: send_bytes(buffer[, offset[, size]]) Send byte data from an object supporting the buffer interface as a complete message. If *offset* is given then data is read from that position in *buffer*. If *size* is given then that many bytes will be read from buffer. Very large buffers (approximately 32 MB+, though it depends on the OS) may raise a :exc:`ValueError` exception .. method:: recv_bytes([maxlength]) Return a complete message of byte data sent from the other end of the connection as a string. Blocks until there is something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end has closed. If *maxlength* is specified and the message is longer than *maxlength* then :exc:`OSError` is raised and the connection will no longer be readable. .. versionchanged:: 3.3 This function used to raise a :exc:`IOError`, which is now an alias of :exc:`OSError`. .. method:: recv_bytes_into(buffer[, offset]) Read into *buffer* a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Blocks until there is something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end was closed. *buffer* must be an object satisfying the writable buffer interface. If *offset* is given then the message will be written into the buffer from that position. Offset must be a non-negative integer less than the length of *buffer* (in bytes). If the buffer is too short then a :exc:`BufferTooShort` exception is raised and the complete message is available as ``e.args[0]`` where ``e`` is the exception instance. For example: .. doctest:: >>> from multiprocessing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.send_bytes(b'thank you') >>> a.recv_bytes() b'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.send_bytes(arr1) >>> count = b.recv_bytes_into(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The :meth:`Connection.recv` method automatically unpickles the data it receives, which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using :func:`Pipe` you should only use the :meth:`~Connection.recv` and :meth:`~Connection.send` methods after performing some sort of authentication. See :ref:`multiprocessing-auth-keys`. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted, because it may become impossible to be sure where the message boundaries lie. Synchronization primitives ~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally synchronization primitives are not as necessary in a multiprocess program as they are in a multithreaded program. See the documentation for :mod:`threading` module. Note that one can also create synchronization primitives by using a manager object -- see :ref:`multiprocessing-managers`. .. class:: BoundedSemaphore([value]) A bounded semaphore object: a clone of :class:`threading.BoundedSemaphore`. (On Mac OS X, this is indistinguishable from :class:`Semaphore` because ``sem_getvalue()`` is not implemented on that platform). .. class:: Condition([lock]) A condition variable: a clone of :class:`threading.Condition`. If *lock* is specified then it should be a :class:`Lock` or :class:`RLock` object from :mod:`multiprocessing`. .. versionchanged:: 3.3 The :meth:`wait_for` method was added. .. class:: Event() A clone of :class:`threading.Event`. This method returns the state of the internal semaphore on exit, so it will always return ``True`` except if a timeout is given and the operation times out. .. versionchanged:: 3.1 Previously, the method always returned ``None``. .. class:: Lock() A non-recursive lock object: a clone of :class:`threading.Lock`. .. class:: RLock() A recursive lock object: a clone of :class:`threading.RLock`. .. class:: Semaphore([value]) A semaphore object: a clone of :class:`threading.Semaphore`. .. note:: On Mac OS X, ``sem_timedwait`` is unsupported, so calling ``acquire()`` with a timeout will emulate that function's behavior using a sleeping loop. .. note:: If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to :meth:`BoundedSemaphore.acquire`, :meth:`Lock.acquire`, :meth:`RLock.acquire`, :meth:`Semaphore.acquire`, :meth:`Condition.acquire` or :meth:`Condition.wait` then the call will be immediately interrupted and :exc:`KeyboardInterrupt` will be raised. This differs from the behaviour of :mod:`threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared :mod:`ctypes` Objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible to create shared objects using shared memory which can be inherited by child processes. .. function:: Value(typecode_or_type, *args[, lock]) Return a :mod:`ctypes` object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. *typecode_or_type* determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. *\*args* is passed on to the constructor for the type. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: Array(typecode_or_type, size_or_initializer, *, lock=True) Return a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. *typecode_or_type* determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. If *size_or_initializer* is an integer, then it determines the length of the array, and the array will be initially zeroed. Otherwise, *size_or_initializer* is a sequence which is used to initialize the array and whose length determines the length of the array. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword only argument. Note that an array of :data:`ctypes.c_char` has *value* and *raw* attributes which allow one to use it to store and retrieve strings. The :mod:`multiprocessing.sharedctypes` module >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> .. module:: multiprocessing.sharedctypes :synopsis: Allocate ctypes objects from shared memory. The :mod:`multiprocessing.sharedctypes` module provides functions for allocating :mod:`ctypes` objects from shared memory which can be inherited by child processes. .. note:: Although it is possible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. function:: RawArray(typecode_or_type, size_or_initializer) Return a ctypes array allocated from shared memory. *typecode_or_type* determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. If *size_or_initializer* is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise *size_or_initializer* is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use :func:`Array` instead to make sure that access is automatically synchronized using a lock. .. function:: RawValue(typecode_or_type, *args) Return a ctypes object allocated from shared memory. *typecode_or_type* determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. *\*args* is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use :func:`Value` instead to make sure that access is automatically synchronized using a lock. Note that an array of :data:`ctypes.c_char` has ``value`` and ``raw`` attributes which allow one to use it to store and retrieve strings -- see documentation for :mod:`ctypes`. .. function:: Array(typecode_or_type, size_or_initializer, *args[, lock]) The same as :func:`RawArray` except that depending on the value of *lock* a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: Value(typecode_or_type, *args[, lock]) The same as :func:`RawValue` except that depending on the value of *lock* a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: copy(obj) Return a ctypes object allocated from shared memory which is a copy of the ctypes object *obj*. .. function:: synchronized(obj[, lock]) Return a process-safe wrapper object for a ctypes object which uses *lock* to synchronize access. If *lock* is ``None`` (the default) then a :class:`multiprocessing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: :meth:`get_obj` returns the wrapped object and :meth:`get_lock` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table ``MyStruct`` is some subclass of :class:`ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Below is an example where a number of ctypes objects are modified by a child process:: from multiprocessing import Process, Lock from multiprocessing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for a in A: a.x **= 2 a.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print(n.value) print(x.value) print(s.value) print([(a.x, a.y) for a in A]) .. highlight:: none The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. highlight:: python .. _multiprocessing-managers: Managers ~~~~~~~~ Managers provide a way to create data which can be shared between different processes. A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. .. function:: multiprocessing.Manager() Returns a started :class:`~multiprocessing.managers.SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. .. module:: multiprocessing.managers :synopsis: Share data between process with shared objects. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the :mod:`multiprocessing.managers` module: .. class:: BaseManager([address[, authkey]]) Create a BaseManager object. Once created one should call :meth:`start` or ``get_server().serve_forever()`` to ensure that the manager object refers to a started manager process. *address* is the address on which the manager process listens for new connections. If *address* is ``None`` then an arbitrary one is chosen. *authkey* is the authentication key which will be used to check the validity of incoming connections to the server process. If *authkey* is ``None`` then ``current_process().authkey``. Otherwise *authkey* is used and it must be a string. .. method:: start([initializer[, initargs]]) Start a subprocess to start the manager. If *initializer* is not ``None`` then the subprocess will call ``initializer(*initargs)`` when it starts. .. method:: get_server() Returns a :class:`Server` object which represents the actual server under the control of the Manager. The :class:`Server` object supports the :meth:`serve_forever` method:: >>> from multiprocessing.managers import BaseManager >>> manager = BaseManager(address=('', 50000), authkey='abc') >>> server = manager.get_server() >>> server.serve_forever() :class:`Server` additionally has an :attr:`address` attribute. .. method:: connect() Connect a local manager object to a remote manager process:: >>> from multiprocessing.managers import BaseManager >>> m = BaseManager(address=('127.0.0.1', 5000), authkey='abc') >>> m.connect() .. method:: shutdown() Stop the process used by the manager. This is only available if :meth:`start` has been used to start the server process. This can be called multiple times. .. method:: register(typeid[, callable[, proxytype[, exposed[, method_to_typeid[, create_method]]]]]) A classmethod which can be used for registering a type or callable with the manager class. *typeid* is a "type identifier" which is used to identify a particular type of shared object. This must be a string. *callable* is a callable used for creating objects for this type identifier. If a manager instance will be created using the :meth:`from_address` classmethod or if the *create_method* argument is ``False`` then this can be left as ``None``. *proxytype* is a subclass of :class:`BaseProxy` which is used to create proxies for shared objects with this *typeid*. If ``None`` then a proxy class is created automatically. *exposed* is used to specify a sequence of method names which proxies for this typeid should be allowed to access using :meth:`BaseProxy._callMethod`. (If *exposed* is ``None`` then :attr:`proxytype._exposed_` is used instead if it exists.) In the case where no exposed list is specified, all "public methods" of the shared object will be accessible. (Here a "public method" means any attribute which has a :meth:`__call__` method and whose name does not begin with ``'_'``.) *method_to_typeid* is a mapping used to specify the return type of those exposed methods which should return a proxy. It maps method names to typeid strings. (If *method_to_typeid* is ``None`` then :attr:`proxytype._method_to_typeid_` is used instead if it exists.) If a method's name is not a key of this mapping or if the mapping is ``None`` then the object returned by the method will be copied by value. *create_method* determines whether a method should be created with name *typeid* which can be used to tell the server process to create a new shared object and return a proxy for it. By default it is ``True``. :class:`BaseManager` instances also have one read-only property: .. attribute:: address The address used by the manager. .. class:: SyncManager A subclass of :class:`BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by :func:`multiprocessing.Manager`. It also supports creation of shared lists and dictionaries. .. method:: BoundedSemaphore([value]) Create a shared :class:`threading.BoundedSemaphore` object and return a proxy for it. .. method:: Condition([lock]) Create a shared :class:`threading.Condition` object and return a proxy for it. If *lock* is supplied then it should be a proxy for a :class:`threading.Lock` or :class:`threading.RLock` object. .. versionchanged:: 3.3 The :meth:`wait_for` method was added. .. method:: Event() Create a shared :class:`threading.Event` object and return a proxy for it. .. method:: Lock() Create a shared :class:`threading.Lock` object and return a proxy for it. .. method:: Namespace() Create a shared :class:`Namespace` object and return a proxy for it. .. method:: Queue([maxsize]) Create a shared :class:`Queue.Queue` object and return a proxy for it. .. method:: RLock() Create a shared :class:`threading.RLock` object and return a proxy for it. .. method:: Semaphore([value]) Create a shared :class:`threading.Semaphore` object and return a proxy for it. .. method:: Array(typecode, sequence) Create an array and return a proxy for it. .. method:: Value(typecode, value) Create an object with a writable ``value`` attribute and return a proxy for it. .. method:: dict() dict(mapping) dict(sequence) Create a shared ``dict`` object and return a proxy for it. .. method:: list() list(sequence) Create a shared ``list`` object and return a proxy for it. .. note:: Modifications to mutable values or items in dict and list proxies will not be propagated through the manager, because the proxy has no way of knowing when its values or items are modified. To modify such an item, you can re-assign the modified object to the container proxy:: # create a list proxy and append a mutable object (a dictionary) lproxy = manager.list() lproxy.append({}) # now mutate the dictionary d = lproxy[0] d['a'] = 1 d['b'] = 2 # at this point, the changes to d are not yet synced, but by # reassigning the dictionary, the proxy is notified of the change lproxy[0] = d Namespace objects >>>>>>>>>>>>>>>>> A namespace object has no public methods, but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with ``'_'`` will be an attribute of the proxy and not an attribute of the referent: .. doctest:: >>> manager = multiprocessing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print(Global) Namespace(x=10, y='hello') Customized managers >>>>>>>>>>>>>>>>>>> To create one's own manager, one creates a subclass of :class:`BaseManager` and uses the :meth:`~BaseManager.register` classmethod to register new types or callables with the manager class. For example:: from multiprocessing.managers import BaseManager class MathsClass: def add(self, x, y): return x + y def mul(self, x, y): return x * y class MyManager(BaseManager): pass MyManager.register('Maths', MathsClass) if __name__ == '__main__': manager = MyManager() manager.start() maths = manager.Maths() print(maths.add(4, 3)) # prints 7 print(maths.mul(7, 8)) # prints 56 Using a remote manager >>>>>>>>>>>>>>>>>>>>>> It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a single shared queue which remote clients can access:: >>> from multiprocessing.managers import BaseManager >>> import queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue', callable=lambda:queue) >>> m = QueueManager(address=('', 50000), authkey='abracadabra') >>> s = m.get_server() >>> s.serve_forever() One client can access the server as follows:: >>> from multiprocessing.managers import BaseManager >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue') >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='abracadabra') >>> m.connect() >>> queue = m.get_queue() >>> Queue.put('hello') Another client can also use it:: >>> from multiprocessing.managers import BaseManager >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue') >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='abracadabra') >>> m.connect() >>> queue = m.get_queue() >>> Queue.get() 'hello' Local processes can also access that queue, using the code from above on the client to access it remotely:: >>> from multiprocessing import Process, Queue >>> from multiprocessing.managers import BaseManager >>> class Worker(Process): ... def __init__(self, q): ... self.q = q ... super(Worker, self).__init__() ... def run(self): ... self.q.put('local hello') ... >>> queue = Queue() >>> w = Worker(queue) >>> w.start() >>> class QueueManager(BaseManager): pass ... >>> QueueManager.register('get_queue', callable=lambda: queue) >>> m = QueueManager(address=('', 50000), authkey='abracadabra') >>> s = m.get_server() >>> s.serve_forever() Proxy Objects ~~~~~~~~~~~~~ A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that its referent can: .. doctest:: >>> from multiprocessing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print(l) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print(repr(l)) >>> l[4] 16 >>> l[2:5] [4, 9, 16] Notice that applying :func:`str` to a proxy will return the representation of the referent, whereas applying :func:`repr` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second: .. doctest:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of a now contains referent of b >>> print(a, b) [[]] [] >>> b.append('hello') >>> print(a, b) [['hello']] ['hello'] .. note:: The proxy types in :mod:`multiprocessing` do nothing to support comparisons by value. So, for instance, we have: .. doctest:: >>> manager.list([1,2,3]) == [1,2,3] False One should just use a copy of the referent instead when making comparisons. .. class:: BaseProxy Proxy objects are instances of subclasses of :class:`BaseProxy`. .. method:: _callmethod(methodname[, args[, kwds]]) Call and return the result of a method of the proxy's referent. If ``proxy`` is a proxy whose referent is ``obj`` then the expression :: proxy._callmethod(methodname, args, kwds) will evaluate the expression :: getattr(obj, methodname)(*args, **kwds) in the manager's process. The returned value will be a copy of the result of the call or a proxy to a new shared object -- see documentation for the *method_to_typeid* argument of :meth:`BaseManager.register`. If an exception is raised by the call, then is re-raised by :meth:`_callmethod`. If some other exception is raised in the manager's process then this is converted into a :exc:`RemoteError` exception and is raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has not been *exposed* An example of the usage of :meth:`_callmethod`: .. doctest:: >>> l = manager.list(range(10)) >>> l._callmethod('__len__') 10 >>> l._callmethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callmethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range .. method:: _getvalue() Return a copy of the referent. If the referent is unpicklable then this will raise an exception. .. method:: __repr__ Return a representation of the proxy object. .. method:: __str__ Return the representation of the referent. Cleanup >>>>>>> A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Process Pools ~~~~~~~~~~~~~ .. module:: multiprocessing.pool :synopsis: Create pools of processes. One can create a pool of processes which will carry out tasks submitted to it with the :class:`Pool` class. .. class:: multiprocessing.Pool([processes[, initializer[, initargs[, maxtasksperchild]]]]) A process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. *processes* is the number of worker processes to use. If *processes* is ``None`` then the number returned by :func:`cpu_count` is used. If *initializer* is not ``None`` then each worker process will call ``initializer(*initargs)`` when it starts. .. versionadded:: 3.2 *maxtasksperchild* is the number of tasks a worker process can complete before it will exit and be replaced with a fresh worker process, to enable unused resources to be freed. The default *maxtasksperchild* is None, which means worker processes will live as long as the pool. .. note:: Worker processes within a :class:`Pool` typically live for the complete duration of the Pool's work queue. A frequent pattern found in other systems (such as Apache, mod_wsgi, etc) to free resources held by workers is to allow a worker within a pool to complete only a set amount of work before being exiting, being cleaned up and a new process spawned to replace the old one. The *maxtasksperchild* argument to the :class:`Pool` exposes this ability to the end user. .. method:: apply(func[, args[, kwds]]) Call *func* with arguments *args* and keyword arguments *kwds*. It blocks until the result is ready. Given this blocks, :meth:`apply_async` is better suited for performing work in parallel. Additionally, *func* is only executed in one of the workers of the pool. .. method:: apply_async(func[, args[, kwds[, callback[, error_callback]]]]) A variant of the :meth:`apply` method which returns a result object. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* is applied instead If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then the *error_callback* is called with the exception instance. Callbacks should complete immediately since otherwise the thread which handles the results will get blocked. .. method:: map(func, iterable[, chunksize]) A parallel equivalent of the :func:`map` built-in function (it supports only one *iterable* argument though). It blocks until the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. .. method:: map_async(func, iterable[, chunksize[, callback[, error_callback]]]) A variant of the :meth:`.map` method which returns a result object. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* is applied instead If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then the *error_callback* is called with the exception instance. Callbacks should complete immediately since otherwise the thread which handles the results will get blocked. .. method:: imap(func, iterable[, chunksize]) A lazier version of :meth:`map`. The *chunksize* argument is the same as the one used by the :meth:`.map` method. For very long iterables using a large value for *chunksize* can make the job complete **much** faster than using the default value of ``1``. Also if *chunksize* is ``1`` then the :meth:`!next` method of the iterator returned by the :meth:`imap` method has an optional *timeout* parameter: ``next(timeout)`` will raise :exc:`multiprocessing.TimeoutError` if the result cannot be returned within *timeout* seconds. .. method:: imap_unordered(func, iterable[, chunksize]) The same as :meth:`imap` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) .. method:: starmap(func, iterable[, chunksize]) Like :meth:`map` except that the elements of the `iterable` are expected to be iterables that are unpacked as arguments. Hence an `iterable` of `[(1,2), (3, 4)]` results in `[func(1,2), func(3,4)]`. .. versionadded:: 3.3 .. method:: starmap_async(func, iterable[, chunksize[, callback[, error_back]]]) A combination of :meth:`starmap` and :meth:`map_async` that iterates over `iterable` of iterables and calls `func` with the iterables unpacked. Returns a result object. .. versionadded:: 3.3 .. method:: close() Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. .. method:: terminate() Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected :meth:`terminate` will be called immediately. .. method:: join() Wait for the worker processes to exit. One must call :meth:`close` or :meth:`terminate` before using :meth:`join`. .. class:: AsyncResult The class of the result returned by :meth:`Pool.apply_async` and :meth:`Pool.map_async`. .. method:: get([timeout]) Return the result when it arrives. If *timeout* is not ``None`` and the result does not arrive within *timeout* seconds then :exc:`multiprocessing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by :meth:`get`. .. method:: wait([timeout]) Wait until the result is available or until *timeout* seconds pass. .. method:: ready() Return whether the call has completed. .. method:: successful() Return whether the call completed without raising an exception. Will raise :exc:`AssertionError` if the result is not ready. The following example demonstrates the use of a pool:: from multiprocessing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.apply_async(f, (10,)) # evaluate "f(10)" asynchronously print(result.get(timeout=1)) # prints "100" unless your computer is *very* slow print(pool.map(f, range(10))) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print(next(it)) # prints "0" print(next(it)) # prints "1" print(it.next(timeout=1)) # prints "4" unless your computer is *very* slow import time result = pool.apply_async(time.sleep, (10,)) print(result.get(timeout=1)) # raises TimeoutError .. _multiprocessing-listeners-clients: Listeners and Clients ~~~~~~~~~~~~~~~~~~~~~ .. module:: multiprocessing.connection :synopsis: API for dealing with sockets. Usually message passing between processes is done using queues or by using :class:`Connection` objects returned by :func:`Pipe`. However, the :mod:`multiprocessing.connection` module allows some extra flexibility. It basically gives a high level message oriented API for dealing with sockets or Windows named pipes. It also has support for *digest authentication* using the :mod:`hmac` module, and for polling multiple connections at the same time. .. function:: deliver_challenge(connection, authkey) Send a randomly generated message to the other end of the connection and wait for a reply. If the reply matches the digest of the message using *authkey* as the key then a welcome message is sent to the other end of the connection. Otherwise :exc:`AuthenticationError` is raised. .. function:: answerChallenge(connection, authkey) Receive a message, calculate the digest of the message using *authkey* as the key, and then send the digest back. If a welcome message is not received, then :exc:`AuthenticationError` is raised. .. function:: Client(address[, family[, authenticate[, authkey]]]) Attempt to set up a connection to the listener which is using address *address*, returning a :class:`~multiprocessing.Connection`. The type of the connection is determined by *family* argument, but this can generally be omitted since it can usually be inferred from the format of *address*. (See :ref:`multiprocessing-address-formats`) If *authenticate* is ``True`` or *authkey* is a string then digest authentication is used. The key used for authentication will be either *authkey* or ``current_process().authkey)`` if *authkey* is ``None``. If authentication fails then :exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`. .. class:: Listener([address[, family[, backlog[, authenticate[, authkey]]]]]) A wrapper for a bound socket or Windows named pipe which is 'listening' for connections. *address* is the address to be used by the bound socket or named pipe of the listener object. .. note:: If an address of '0.0.0.0' is used, the address will not be a connectable end point on Windows. If you require a connectable end-point, you should use '127.0.0.1'. *family* is the type of socket (or named pipe) to use. This can be one of the strings ``'AF_INET'`` (for a TCP socket), ``'AF_UNIX'`` (for a Unix domain socket) or ``'AF_PIPE'`` (for a Windows named pipe). Of these only the first is guaranteed to be available. If *family* is ``None`` then the family is inferred from the format of *address*. If *address* is also ``None`` then a default is chosen. This default is the family which is assumed to be the fastest available. See :ref:`multiprocessing-address-formats`. Note that if *family* is ``'AF_UNIX'`` and address is ``None`` then the socket will be created in a private temporary directory created using :func:`tempfile.mkstemp`. If the listener object uses a socket then *backlog* (1 by default) is passed to the :meth:`listen` method of the socket once it has been bound. If *authenticate* is ``True`` (``False`` by default) or *authkey* is not ``None`` then digest authentication is used. If *authkey* is a string then it will be used as the authentication key; otherwise it must be *None*. If *authkey* is ``None`` and *authenticate* is ``True`` then ``current_process().authkey`` is used as the authentication key. If *authkey* is ``None`` and *authenticate* is ``False`` then no authentication is done. If authentication fails then :exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`. .. method:: accept() Accept a connection on the bound socket or named pipe of the listener object and return a :class:`Connection` object. If authentication is attempted and fails, then :exc:`AuthenticationError` is raised. .. method:: close() Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: .. attribute:: address The address which is being used by the Listener object. .. attribute:: last_accepted The address from which the last accepted connection came. If this is unavailable then it is ``None``. .. function:: wait(object_list, timeout=None) Wait till an object in *object_list* is ready. Returns the list of those objects in *object_list* which are ready. If *timeout* is a float then the call blocks for at most that many seconds. If *timeout* is ``None`` then it will block for an unlimited period. For both Unix and Windows, an object can appear in *object_list* if it is * a readable :class:`~multiprocessing.Connection` object; * a connected and readable :class:`socket.socket` object; or * the :attr:`~multiprocessing.Process.sentinel` attribute of a :class:`~multiprocessing.Process` object. A connection or socket object is ready when there is data available to be read from it, or the other end has been closed. **Unix**: ``wait(object_list, timeout)`` almost equivalent ``select.select(object_list, [], [], timeout)``. The difference is that, if :func:`select.select` is interrupted by a signal, it can raise :exc:`OSError` with an error number of ``EINTR``, whereas :func:`wait` will not. **Windows**: An item in *object_list* must either be an integer handle which is waitable (according to the definition used by the documentation of the Win32 function ``WaitForMultipleObjects()``) or it can be an object with a :meth:`fileno` method which returns a socket handle or pipe handle. (Note that pipe handles and socket handles are **not** waitable handles.) .. versionadded:: 3.3 The module defines two exceptions: .. exception:: AuthenticationError Exception raised when there is an authentication error. **Examples** The following server code creates a listener which uses ``'secret password'`` as an authentication key. It then waits for a connection and sends some data to the client:: from multiprocessing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey=b'secret password') conn = listener.accept() print('connection accepted from', listener.last_accepted) conn.send([2.25, None, 'junk', float]) conn.send_bytes(b'hello') conn.send_bytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from multiprocessing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey=b'secret password') print(conn.recv()) # => [2.25, None, 'junk', float] print(conn.recv_bytes()) # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print(conn.recv_bytes_into(arr)) # => 8 print(arr) # => array('i', [42, 1729, 0, 0, 0]) conn.close() The following code uses :func:`~multiprocessing.connection.wait` to wait for messages from multiple processes at once:: import time, random from multiprocessing import Process, Pipe, current_process from multiprocessing.connection import wait def foo(w): for i in range(10): w.send((i, current_process().name)) w.close() if __name__ == '__main__': readers = [] for i in range(4): r, w = Pipe(duplex=False) readers.append(r) p = Process(target=foo, args=(w,)) p.start() # We close the writable end of the pipe now to be sure that # p is the only process which owns a handle for it. This # ensures that when p closes its handle for the writable end, # wait() will promptly report the readable end as being ready. w.close() while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) else: print(msg) .. _multiprocessing-address-formats: Address Formats >>>>>>>>>>>>>>> * An ``'AF_INET'`` address is a tuple of the form ``(hostname, port)`` where *hostname* is a string and *port* is an integer. * An ``'AF_UNIX'`` address is a string representing a filename on the filesystem. * An ``'AF_PIPE'`` address is a string of the form :samp:`r'\\\\.\\pipe\\{PipeName}'`. To use :func:`Client` to connect to a named pipe on a remote computer called *ServerName* one should use an address of the form :samp:`r'\\\\{ServerName}\\pipe\\{PipeName}'` instead. Note that any string beginning with two backslashes is assumed by default to be an ``'AF_PIPE'`` address rather than an ``'AF_UNIX'`` address. .. _multiprocessing-auth-keys: Authentication keys ~~~~~~~~~~~~~~~~~~~ When one uses :meth:`Connection.recv`, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does **not** involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of ``current_process().authkey`` is used (see :class:`~multiprocessing.Process`). This value will automatically inherited by any :class:`~multiprocessing.Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between themselves. Suitable authentication keys can also be generated by using :func:`os.urandom`. Logging ~~~~~~~ Some support for logging is available. Note, however, that the :mod:`logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. .. currentmodule:: multiprocessing .. function:: get_logger() Returns the logger used by :mod:`multiprocessing`. If necessary, a new one will be created. When first created the logger has level :data:`logging.NOTSET` and no default handler. Messages sent to this logger will not by default propagate to the root logger. Note that on Windows child processes will only inherit the level of the parent process's logger -- any other customization of the logger will not be inherited. .. currentmodule:: multiprocessing .. function:: log_to_stderr() This function performs a call to :func:`get_logger` but in addition to returning the logger created by get_logger, it adds a handler which sends output to :data:`sys.stderr` using format ``'[%(levelname)s/%(processName)s] %(message)s'``. Below is an example session with logging turned on:: >>> import multiprocessing, logging >>> logger = multiprocessing.log_to_stderr() >>> logger.setLevel(logging.INFO) >>> logger.warning('doomed') [WARNING/MainProcess] doomed >>> m = multiprocessing.Manager() [INFO/SyncManager-...] child process calling self.run() [INFO/SyncManager-...] created temp directory /.../pymp-... [INFO/SyncManager-...] manager serving at '/.../listener-...' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-...] manager exiting with exitcode 0 In addition to having these two logging functions, the multiprocessing also exposes two additional logging level attributes. These are :const:`SUBWARNING` and :const:`SUBDEBUG`. The table below illustrates where theses fit in the normal level hierarchy. +----------------+----------------+ | Level | Numeric value | +================+================+ | ``SUBWARNING`` | 25 | +----------------+----------------+ | ``SUBDEBUG`` | 5 | +----------------+----------------+ For a full table of logging levels, see the :mod:`logging` module. These additional logging levels are used primarily for certain debug messages within the multiprocessing module. Below is the same example as above, except with :const:`SUBDEBUG` enabled:: >>> import multiprocessing, logging >>> logger = multiprocessing.log_to_stderr() >>> logger.setLevel(multiprocessing.SUBDEBUG) >>> logger.warning('doomed') [WARNING/MainProcess] doomed >>> m = multiprocessing.Manager() [INFO/SyncManager-...] child process calling self.run() [INFO/SyncManager-...] created temp directory /.../pymp-... [INFO/SyncManager-...] manager serving at '/.../pymp-djGBXN/listener-...' >>> del m [SUBDEBUG/MainProcess] finalizer calling ... [INFO/MainProcess] sending shutdown message to manager [DEBUG/SyncManager-...] manager received shutdown message [SUBDEBUG/SyncManager-...] calling ... [SUBDEBUG/SyncManager-...] calling [SUBDEBUG/SyncManager-...] finalizer calling ... [INFO/SyncManager-...] manager exiting with exitcode 0 The :mod:`multiprocessing.dummy` module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: multiprocessing.dummy :synopsis: Dumb wrapper around threading. :mod:`multiprocessing.dummy` replicates the API of :mod:`multiprocessing` but is no more than a wrapper around the :mod:`threading` module. .. _multiprocessing-programming: Programming guidelines ---------------------- There are certain guidelines and idioms which should be adhered to when using :mod:`multiprocessing`. All platforms ~~~~~~~~~~~~~ Avoid shared state As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the :mod:`threading` module. Picklability Ensure that the arguments to the methods of proxies are picklable. Thread safety of proxies Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the *same* proxy.) Joining zombie processes On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or :func:`active_children` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's :meth:`Process.is_alive` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. Better to inherit than pickle/unpickle On Windows many types from :mod:`multiprocessing` need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which needs access to a shared resource created elsewhere can inherit it from an ancestor process. Avoid terminating processes Using the :meth:`Process.terminate` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using :meth:`Process.terminate` on processes which never use any shared resources. Joining processes that use queues Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the :meth:`Queue.cancel_join_thread` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from multiprocessing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the ``p.join()`` line). Explicitly pass resources to child processes On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from multiprocessing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from multiprocessing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Beware of replacing :data:`sys.stdin` with a "file like object" :mod:`multiprocessing` originally unconditionally called:: os.close(sys.stdin.fileno()) in the :meth:`multiprocessing.Process._bootstrap` method --- this resulted in issues with processes-in-processes. This has been changed to:: sys.stdin.close() sys.stdin = open(os.devnull) Which solves the fundamental issue of processes colliding with each other resulting in a bad file descriptor error, but introduces a potential danger to applications which replace :func:`sys.stdin` with a "file-like object" with output buffering. This danger is that if multiple processes call :func:`close()` on this file-like object, it could result in the same data being flushed to the object multiple times, resulting in corruption. If you write a file-like object and implement your own caching, you can make it fork-safe by storing the pid whenever you append to the cache, and discarding the cache when the pid changes. For example:: @property def cache(self): pid = os.getpid() if pid != self._pid: self._pid = pid self._cache = [] return self._cache For more information, see :issue:`5155`, :issue:`5313` and :issue:`5331` Windows ~~~~~~~ Since Windows lacks :func:`os.fork` it has a few extra restrictions: More picklability Ensure that all arguments to :meth:`Process.__init__` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the ``target`` argument on Windows --- just define a function and use that instead. Also, if you subclass :class:`Process` then make sure that instances will be picklable when the :meth:`Process.start` method is called. Global variables Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that :meth:`Process.start` was called. However, global variables which are just module level constants cause no problems. Safe importing of main module Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a :exc:`RuntimeError`:: from multiprocessing import Process def foo(): print('hello') p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using ``if __name__ == '__main__':`` as follows:: from multiprocessing import Process, freeze_support def foo(): print('hello') if __name__ == '__main__': freeze_support() p = Process(target=foo) p.start() (The ``freeze_support()`` line can be omitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's ``foo()`` function. Similar restrictions apply if a pool or manager is created in the main module. .. _multiprocessing-examples: Examples -------- Demonstration of how to create and use customized managers and proxies: .. literalinclude:: ../includes/mp_newtype.py :language: python3 Using :class:`Pool`: .. literalinclude:: ../includes/mp_pool.py :language: python3 Synchronization types like locks, conditions and queues: .. literalinclude:: ../includes/mp_synchronize.py :language: python3 An example showing how to use queues to feed tasks to a collection of worker processes and collect the results: .. literalinclude:: ../includes/mp_workers.py An example of how a pool of worker processes can each run a :class:`~http.server.SimpleHTTPRequestHandler` instance while sharing a single listening socket. .. literalinclude:: ../includes/mp_webserver.py Some simple benchmarks comparing :mod:`multiprocessing` with :mod:`threading`: .. literalinclude:: ../includes/mp_benchmarks.py billiard-3.3.0.15/funtests/0000755000076500000000000000000012276217622016017 5ustar asksolwheel00000000000000billiard-3.3.0.15/funtests/__init__.py0000644000076500000000000000012412270022117020110 0ustar asksolwheel00000000000000import os import sys sys.path.insert(0, os.pardir) sys.path.insert(0, os.getcwd()) billiard-3.3.0.15/funtests/setup.py0000644000076500000000000000270012270022117017513 0ustar asksolwheel00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup from setuptools.command.install import install except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup # noqa from setuptools.command.install import install # noqa class no_install(install): def run(self, *args, **kwargs): import sys sys.stderr.write(""" ------------------------------------------------------- The billiard functional test suite cannot be installed. ------------------------------------------------------- But you can execute the tests by running the command: $ python setup.py test """) setup( name='billiard-funtests', version='DEV', description='Functional test suite for billiard', author='Ask Solem', author_email='ask@celeryproject.org', url='http://github.com/celery/billiard', platforms=['any'], packages=[], data_files=[], zip_safe=False, cmdclass={'install': no_install}, test_suite='nose.collector', build_requires=[ 'nose', 'nose-cover3', 'unittest2', 'coverage>=3.0', ], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: C' 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', ], long_description='Do not install this package', ) billiard-3.3.0.15/funtests/tests/0000755000076500000000000000000012276217622017161 5ustar asksolwheel00000000000000billiard-3.3.0.15/funtests/tests/__init__.py0000644000076500000000000000022512270022117021254 0ustar asksolwheel00000000000000import os import sys sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) print(sys.path[0]) sys.path.insert(0, os.getcwd()) print(sys.path[0]) billiard-3.3.0.15/funtests/tests/test_multiprocessing.py0000644000076500000000000016717012270022117024020 0ustar asksolwheel00000000000000#!/usr/bin/env python from __future__ import absolute_import # # Unit tests for the multiprocessing package # import unittest import Queue import time import sys import os import gc import array import random import logging from nose import SkipTest from test import test_support from StringIO import StringIO try: from billiard._ext import _billiard except ImportError as exc: raise SkipTest(exc) # import threading after _billiard to raise a more revelant error # message: "No module named _billiard". _billiard is not compiled # without thread support. import threading # Work around broken sem_open implementations try: import billiard.synchronize except ImportError as exc: raise SkipTest(exc) import billiard.dummy import billiard.connection import billiard.managers import billiard.heap import billiard.pool from billiard import util from billiard.compat import bytes latin = str # Constants LOG_LEVEL = util.SUBWARNING DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_billiard, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") # Some tests require ctypes try: from ctypes import Structure, c_int, c_double except ImportError: Structure = object c_int = c_double = None try: from ctypes import Value except ImportError: Value = None try: from ctypes import copy as ctypes_copy except ImportError: ctypes_copy = None class TimingWrapper(object): """Creates a wrapper for a function which records the time it takes to finish""" def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.time() try: return self.func(*args, **kwds) finally: self.elapsed = time.time() - t class BaseTestCase(object): """Base class for test cases""" ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) def get_value(self): """Return the value of a semaphore""" try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError class _TestProcesses(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': return current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def _test(self, q, *args, **kwds): current = self.current_process() q.put(args) q.put(kwds) q.put(current.name) if self.TYPE != 'threads': q.put(bytes(current.authkey, 'ascii')) q.put(current.pid) def test_process(self): q = self.Queue(1) e = self.Event() # noqa args = (q, 1, 2) kwargs = {'hello': 23, 'bye': 2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEquals(p.authkey, current.authkey) self.assertEquals(p.is_alive(), False) self.assertEquals(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEquals(p.exitcode, None) self.assertEquals(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEquals(q.get(), args[1:]) self.assertEquals(q.get(), kwargs) self.assertEquals(q.get(), p.name) if self.TYPE != 'threads': self.assertEquals(q.get(), current.authkey) self.assertEquals(q.get(), p.pid) p.join() self.assertEquals(p.exitcode, 0) self.assertEquals(p.is_alive(), False) self.assertNotIn(p, self.active_children()) def _test_terminate(self): time.sleep(1000) def test_terminate(self): if self.TYPE == 'threads': return p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) p.terminate() join = TimingWrapper(p.join) self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = billiard.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) def _test_recursion(self, wconn, id): __import__('billiard.forking') wconn.send(id) if len(id) < 2: for i in range(2): p = self.Process( target=self._test_recursion, args=(wconn, id + [i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) class _UpperCaser(billiard.Process): def __init__(self): billiard.Process.__init__(self) self.child_conn, self.parent_conn = billiard.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): def _test_put(self, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(Queue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() def _test_get(self, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(Queue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(Queue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(Queue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() def _test_fork(self, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(Queue.Empty, queue.get, False) p.join() def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: return q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) def _test_task_done(self, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): self.skipTest("requires 'queue.task_done()' method") workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in xrange(4)] for p in workers: p.start() for i in xrange(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': return sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): def f(self, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() # wait for them all to sleep for i in xrange(6): sleeping.acquire() # check they have all timed out for i in xrange(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() # wait for them to all sleep for i in xrange(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken time.sleep(DELTA) self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, None) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) class _TestEvent(BaseTestCase): def _test_event(self, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporaily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) self.Process(target=self._test_event, args=(event,)).start() self.assertEqual(wait(), True) class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('c', latin('x'), latin('y')) ] def _test(self, values): for sv, cv in zip(values, self.codes_values): sv.value = cv[2] @unittest.skipIf(c_int is None, "requires _ctypes") def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawvalue(self): self.test_value(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() # noqa obj1 = val1.get_obj() # noqa val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() # noqa obj2 = val2.get_obj() # noqa lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() # noqa obj3 = val3.get_obj() # noqa self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) def f(self, seq): for i in range(1, len(seq)): seq[i] += seq[i - 1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', range(10)) lock1 = arr1.get_lock() # noqa obj1 = arr1.get_obj() # noqa arr2 = self.Array('i', range(10), lock=None) lock2 = arr2.get_lock() # noqa obj2 = arr2.get_obj() # noqa lock = self.Lock() arr3 = self.Array('i', range(10), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() # noqa self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(range(10)) self.assertEqual(a[:], range(10)) b = self.list() self.assertEqual(b[:], []) b.extend(range(5)) self.assertEqual(b[:], range(5)) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2, 3, 4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], range(10)) d = [a, b] e = self.list(d) self.assertEqual( e[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) def test_dict(self): d = self.dict() indices = range(65, 70) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((j, chr(j)) for j in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(z) for z in indices]) self.assertEqual(sorted(d.items()), [(x, chr(x)) for x in indices]) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) def sqr(x, wait=0.0): time.sleep(wait) return x * x class _TestPool(BaseTestCase): def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) self.assertEqual(pmap(sqr, range(100), chunksize=20), map(sqr, range(100))) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except billiard.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) get = TimingWrapper(res.get) self.assertRaises(billiard.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, range(10)) self.assertEqual(list(it), map(sqr, range(10))) it = self.pool.imap(sqr, range(10)) for i in range(10): self.assertEqual(it.next(), i * i) self.assertRaises(StopIteration, it.next) it = self.pool.imap(sqr, range(1000), chunksize=100) for i in range(1000): self.assertEqual(it.next(), i * i) self.assertRaises(StopIteration, it.next) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, range(1000)) self.assertEqual(sorted(it), map(sqr, range(1000))) it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) self.assertEqual(sorted(it), map(sqr, range(1000))) def test_make_pool(self): p = billiard.Pool(3) self.assertEqual(3, len(p._pool)) p.close() p.join() def test_terminate(self): if self.TYPE == 'manager': # On Unix a forked process increfs each shared object to # which its parent process held a reference. If the # forked process gets terminated then there is likely to # be a reference leak. So to prevent # _TestZZZNumberOfObjects from failing we skip this test # when using a manager. return self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() self.assertTrue(join.elapsed < 0.2) class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = billiard.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive countdown = 5 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [worker.pid for worker in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() class _TestZZZNumberOfObjects(BaseTestCase): # Test that manager has expected number of shared objects left # Because test cases are sorted alphabetically, this one will get # run after all the other tests for the manager. It tests that # there have been no "reference leaks" for the manager's shared # objects. Note the comment in _TestPool.test_terminate(). ALLOWED_TYPES = ('manager',) def test_number_of_objects(self): EXPECTED_NUMBER = 1 # the pool object is still alive billiard.active_children() # discard dead process objs gc.collect() # do garbage collection refs = self.manager._number_of_objects() debug_info = self.manager._debug_info() if refs != EXPECTED_NUMBER: print(self.manager._debug_info()) print(debug_info) self.assertEqual(refs, EXPECTED_NUMBER) # Test of creating a customized manager class from billiard.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in xrange(10): yield i * i class IteratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i * i for i in range(10)]) manager.shutdown() _queue = Queue.Queue() # Test of connecting to a remote server and using xmlrpclib for serialization def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def _putter(self, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() queue.put(('hello world', None, True, 2.25)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER ) manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue manager.shutdown() class _TestManagerRestart(BaseTestCase): def _putter(self, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) addr = manager.get_server().address manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) manager.start() manager.shutdown() SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _echo(self, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', range(4)) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0] * 10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0] * 10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except billiard.BufferTooShort as exc: self.assertEqual(exc.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(IOError, reader.send, 2) self.assertRaises(IOError, writer.recv) self.assertRaises(IOError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': return msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7 + 8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _test(self, address): conn = self.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() ''' class _TestPicklingConnections(BaseTestCase): """Test of sending connection and socket objects between processes""" ALLOWED_TYPES = ('processes',) def _listener(self, conn, families): for fam in families: l = self.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) if self.TYPE == 'processes': l = socket.socket() l.bind(('localhost', 0)) conn.send(l.getsockname()) l.listen(1) new_conn, addr = l.accept() conn.send(new_conn) conn.recv() def _remote(self, conn): for (address, msg) in iter(conn.recv, None): client = self.connection.Client(address) client.send(msg.upper()) client.close() if self.TYPE == 'processes': address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): try: billiard.allow_connection_pickling() except ImportError: return families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) if self.TYPE == 'processes': msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) if hasattr(socket, 'fromfd'): new_conn = lconn.recv() self.assertEqual(new_conn.recv(100), msg.upper()) else: # XXX On Windows with Py2.6 need to backport fromfd() discard = lconn.recv_bytes() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() ''' class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # create and destroy lots of blocks of different sizes for i in xrange(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = billiard.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] # get the heap object heap = billiard.heap.BufferWrapper._heap # verify the state of the heap all = [] occupied = 0 for L in heap._len_to_seq.values(): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop - start, 'free')) for arena, start, stop in heap._allocated_blocks: all.append((heap._arenas.index(arena), start, stop, stop - start, 'occupied')) occupied += stop - start all.sort() for i in range(len(all) - 1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i + 1][:3] self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes', ) def _double(self, x, y, foo, arr, string): x.value *= 2 y.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 @unittest.skipIf(Value is None, "requires ctypes.Value") def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0 / 3.0, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', range(10), lock=lock) string = self.Array('c', 20, lock=lock) string.value = 'hello' p = self.Process(target=self._double, args=(x, y, foo, arr, string)) p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0 / 3.0) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i * 2) self.assertEqual(string.value, latin('hellohello')) @unittest.skipIf(Value is None, "requires ctypes.Value") def test_synchronize(self): self.test_sharedctypes(lock=True) @unittest.skipIf(ctypes_copy is None, "requires ctypes.copy") def test_copy(self): foo = _Foo(2, 5.0) bar = ctypes_copy(foo) foo.x = 0 foo.y = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def _test_finalize(self, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call mutliprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) class _TestImportStar(BaseTestCase): """Test that from ... import * works for each module""" ALLOWED_TYPES = ('processes',) def test_import(self): modules = [ 'billiard', 'billiard.connection', 'billiard.heap', 'billiard.managers', 'billiard.pool', 'billiard.process', 'billiard.reduction', 'billiard.synchronize', 'billiard.util' ] if c_int is not None: # This module requires _ctypes modules.append('billiard.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] for attr in getattr(mod, '__all__', ()): self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) class _TestLogging(BaseTestCase): """Quick test that logging works -- does not test logging output""" ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = billiard.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) def _test_level(self, conn): logger = billiard.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = billiard.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = billiard.Pipe(duplex=False) logger.setLevel(LEVEL1) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL1, reader.recv()) logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL2, reader.recv()) root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == billiard.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'billiard.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = _billiard.Connection(44977608) self.assertRaises(IOError, conn.poll) self.assertRaises(IOError, _billiard.Connection, -1) def get_attributes(Source, names): d = {} for name in names: obj = getattr(Source, name) if type(obj) == type(get_attributes): obj = staticmethod(obj) d[name] = obj return d def create_test_cases(Mixin, type): result = {} glob = globals() Type = type.capitalize() for name in glob.keys(): if name.startswith('_Test'): base = glob[name] if type in base.ALLOWED_TYPES: newname = 'With' + Type + name[1:] class Temp(base, unittest.TestCase, Mixin): pass result[newname] = Temp Temp.__name__ = newname Temp.__module__ = Mixin.__module__ return result class ProcessesMixin(object): TYPE = 'processes' Process = billiard.Process locals().update(get_attributes(billiard, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'RawValue', 'RawArray', 'current_process', 'active_children', 'Pipe', 'connection', 'JoinableQueue' ))) testcases_processes = create_test_cases(ProcessesMixin, type='processes') globals().update(testcases_processes) class ManagerMixin(object): TYPE = 'manager' Process = billiard.Process manager = object.__new__(billiard.managers.SyncManager) locals().update(get_attributes(manager, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', 'Namespace', 'JoinableQueue' ))) testcases_manager = create_test_cases(ManagerMixin, type='manager') globals().update(testcases_manager) class ThreadsMixin(object): TYPE = 'threads' Process = billiard.dummy.Process locals().update(get_attributes(billiard.dummy, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'current_process', 'active_children', 'Pipe', 'connection', 'dict', 'list', 'Namespace', 'JoinableQueue' ))) testcases_threads = create_test_cases(ThreadsMixin, type='threads') globals().update(testcases_threads) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return bytes('something bogus') def send_bytes(self, data): pass self.assertRaises(billiard.AuthenticationError, billiard.connection.deliver_challenge, _FakeConnection(), bytes('abc')) def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return billiard.connection.CHALLENGE elif self.count == 2: return bytes('something bogus') return bytes('') def send_bytes(self, data): pass self.assertRaises(billiard.AuthenticationError, billiard.connection.answer_challenge, _FakeConnection(), bytes('abc')) def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): """Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 """ def setUp(self): self.mgr = billiard.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() def test_manager_initializer(self): m = billiard.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() def test_pool_initializer(self): self.assertRaises(TypeError, billiard.Pool, initializer=1) p = billiard.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) def _ThisSubProcess(q): try: q.get(block=False) except Queue.Empty: pass def _TestProcess(q): """Issue 5155, 5313, 5331: Test process in processes Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior """ queue = billiard.Queue() subProc = billiard.Process(target=_ThisSubProcess, args=(queue,)) subProc.start() subProc.join() def _afunc(x): return x * x def pool_in_process(): pool = billiard.Pool(processes=4) pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): queue = billiard.Queue() proc = billiard.Process(target=_TestProcess, args=(queue,)) proc.start() proc.join() def test_pool_in_process(self): p = billiard.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = StringIO() flike = _file_like(sio) flike.write('foo') proc = billiard.Process(target=lambda: flike.flush()) self.assertTrue(proc) flike.flush() assert sio.getvalue() == 'foo' testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor] def test_main(run=None): if sys.platform.startswith("linux"): try: billiard.RLock() except OSError: raise SkipTest("OSError raises on RLock creation, see issue 3111!") if run is None: from test.test_support import run_unittest as run util.get_temp_dir() # creates temp directory for use by all processes billiard.get_logger().setLevel(LOG_LEVEL) ProcessesMixin.pool = billiard.Pool(4) ThreadsMixin.pool = billiard.dummy.Pool(4) ManagerMixin.manager.__init__() ManagerMixin.manager.start() ManagerMixin.pool = ManagerMixin.manager.Pool(4) testcases = ( sorted(testcases_processes.values(), key=lambda tc: tc.__name__) + sorted(testcases_threads.values(), key=lambda tc: tc.__name__) + sorted(testcases_manager.values(), key=lambda tc: tc.__name__) + testcases_other ) loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) # (ncoghlan): Whether or not sys.exc_clear is executed by the threading # module during these tests is at least platform dependent and possibly # non-deterministic on any given platform. So we don't mind if the listed # warnings aren't actually raised. with test_support.check_py3k_warnings( (".+__(get|set)slice__ has been removed", DeprecationWarning), (r"sys.exc_clear\(\) not supported", DeprecationWarning), quiet=True): run(suite) ThreadsMixin.pool.terminate() ProcessesMixin.pool.terminate() ManagerMixin.pool.terminate() ManagerMixin.manager.shutdown() del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool def main(): test_main(unittest.TextTestRunner(verbosity=2).run) if __name__ == '__main__': main() billiard-3.3.0.15/INSTALL.txt0000644000076500000000000000516012270022117016000 0ustar asksolwheel00000000000000.. default-role:: literal ================================ Installation of multiprocessing ================================ Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you must install the `ctypes` package (which comes automatically with Python 2.5). Users of Python 2.4 on Windows also need to install the `pywin32` package. On Unix It's highly recommended to use Python 2.5.3 (not yet released) or apply the ``fork-thread-patch-2`` patch from `Issue 1683 http://bugs.python.org/issue1683`_. Windows binary builds for Python 2.4 and Python 2.5 are available at http://pypi.python.org/pypi/multiprocessing Python 2.6 and newer versions already come with multiprocessing. Although the stand alone variant of the multiprocessing package is kept compatible with 2.6, you mustn't install it with Python 2.6. Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:: python setup.py install It should not be necessary to do any editing of `setup.py` if you are using Windows, Mac OS X or Linux. On other unices it may be necessary to modify the values of the `macros` dictionary or `libraries` list. The section to modify reads :: else: macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1 ) libraries = ['rt'] More details can be found in the comments in `setup.py`. Note that if you use `HAVE_SEM_OPEN=0` then support for posix semaphores will not been compiled in, and then many of the functions in the `processing` namespace like `Lock()`, `Queue()` or will not be available. However, one can still create a manager using `manager = processing.Manager()` and then do `lock = manager.Lock()` etc. Running tests ------------- To run the test scripts using Python 2.5 do :: python -m multiprocessing.tests and on Python 2.4 do :: python -c "from multiprocessing.tests import main; main()" The sources also come with a Makefile. To run the unit tests with the Makefile using Python 2.5 do :: make test using another version of Python do :: make test PYTHON=python2.4 This will run a number of test scripts using both processes and threads. Running examples ---------------- The make target `examples` runs several example scripts. Building docs ------------- To build the standalone documentation you need Sphinx 0.5 and setuptools 0.6c9 or newer. Both are available at http://pypi.python.org/. With setuptools installed, do :: sudo easy_install-2.5 "Sphinx>=0.5" make doc The docs end up in ``build/sphinx/builder_name``. billiard-3.3.0.15/LICENSE.txt0000644000076500000000000000271312270022117015755 0ustar asksolwheel00000000000000Copyright (c) 2006-2008, R Oudkerk and Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. billiard-3.3.0.15/Makefile0000644000076500000000000000205012270022117015564 0ustar asksolwheel00000000000000PYTHON?=python2.5 PYTHONPATH="Lib/" RUNPYTHON=PYTHONPATH=$(PYTHONPATH) $(PYTHON) BUILDER="html" .PHONY=all inplace test clean realclean sdist examples doc all: inplace inplace: clean $(PYTHON) setup.py build_ext -i test: inplace $(RUNPYTHON) -tt -c "from multiprocessing.tests import main; main()" clean: find Lib/ \( -name '*.py[co]' -or -name '*.so' \) -exec rm {} \; rm -rf build/sphinx realclean: clean find . \( -name '*~' -or -name '*.bak' -or -name '*.tmp' \) -exec rm {} \; rm -f MANIFEST rm -rf multiprocessing.egg-info rm -rf build/ rm -rf dist/ sdist: realclean $(PYTHON) setup.py sdist --format=gztar $(PYTHON) setup.py sdist --format=zip examples: inplace @echo -n "\n" @for EXAMPLE in distributing newtype pool synchronize benchmarks workers; do \ echo "*** Running example mp_$${EXAMPLE}.py"; \ $(RUNPYTHON) Doc/includes/mp_$${EXAMPLE}.py || exit 1; \ echo -n "\n***********************\n\n"; \ done doc: mkdir -p Doc/static Doc/templates $(PYTHON) setup.py build_sphinx --builder=$(BUILDER) \ --source-dir=Doc/ billiard-3.3.0.15/MANIFEST.in0000644000076500000000000000034012270022117015662 0ustar asksolwheel00000000000000include *.py include *.txt include *.rst include Makefile recursive-include Lib *.py recursive-include Modules *.c *.h recursive-include Doc *.rst *.py recursive-include funtests *.py recursive-include requirements *.txt billiard-3.3.0.15/Modules/0000755000076500000000000000000012276217622015554 5ustar asksolwheel00000000000000billiard-3.3.0.15/Modules/_billiard/0000755000076500000000000000000012276217622017475 5ustar asksolwheel00000000000000billiard-3.3.0.15/Modules/_billiard/connection.h0000644000076500000000000004432612270022117022001 0ustar asksolwheel00000000000000/* * Definition of a `Connection` type. * Used by `socket_connection.c` and `pipe_connection.c`. * * connection.h * * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt */ #ifndef CONNECTION_H #define CONNECTION_H /* * Read/write flags */ #define READABLE 1 #define WRITABLE 2 #define CHECK_READABLE(self) \ if (!(self->flags & READABLE)) { \ PyErr_SetString(PyExc_IOError, "connection is write-only"); \ return NULL; \ } #define CHECK_WRITABLE(self) \ if (!(self->flags & WRITABLE)) { \ PyErr_SetString(PyExc_IOError, "connection is read-only"); \ return NULL; \ } /* * Externally implemented functions */ extern void _Billiard_setblocking(int fd, int blocking); extern ssize_t _Billiard_conn_send_offset(HANDLE fd, char *string, Py_ssize_t len, Py_ssize_t offset); /* * Allocation and deallocation */ static PyObject * Billiard_connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { BilliardConnectionObject *self; HANDLE handle; BOOL readable = TRUE, writable = TRUE; static char *kwlist[] = {"handle", "readable", "writable", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|ii", kwlist, &handle, &readable, &writable)) return NULL; if (handle == INVALID_HANDLE_VALUE || (Py_ssize_t)handle < 0) { PyErr_Format(PyExc_IOError, "invalid handle %zd", (Py_ssize_t)handle); return NULL; } if (!readable && !writable) { PyErr_SetString(PyExc_ValueError, "either readable or writable must be true"); return NULL; } self = PyObject_New(BilliardConnectionObject, type); if (self == NULL) return NULL; self->weakreflist = NULL; self->handle = handle; self->flags = 0; if (readable) self->flags |= READABLE; if (writable) self->flags |= WRITABLE; assert(self->flags >= 1 && self->flags <= 3); return (PyObject*)self; } static void Billiard_connection_dealloc(BilliardConnectionObject* self) { if (self->weakreflist != NULL) PyObject_ClearWeakRefs((PyObject*)self); if (self->handle != INVALID_HANDLE_VALUE) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS } PyObject_Del(self); } /* * Functions for transferring buffers */ static PyObject * Billiard_connection_sendbytes(BilliardConnectionObject *self, PyObject *args) { char *buffer; Py_ssize_t length, offset=0, size=PY_SSIZE_T_MIN; int res; if (!PyArg_ParseTuple(args, F_RBUFFER "#|" F_PY_SSIZE_T F_PY_SSIZE_T, &buffer, &length, &offset, &size)) return NULL; CHECK_WRITABLE(self); if (offset < 0) { PyErr_SetString(PyExc_ValueError, "offset is negative"); return NULL; } if (length < offset) { PyErr_SetString(PyExc_ValueError, "buffer length < offset"); return NULL; } if (size == PY_SSIZE_T_MIN) { size = length - offset; } else { if (size < 0) { PyErr_SetString(PyExc_ValueError, "size is negative"); return NULL; } if (offset + size > length) { PyErr_SetString(PyExc_ValueError, "buffer length < offset + size"); return NULL; } } res = Billiard_conn_send_string(self, buffer + offset, size); if (res < 0) { if (PyErr_Occurred()) return NULL; else return Billiard_SetError(PyExc_IOError, res); } Py_RETURN_NONE; } static PyObject * Billiard_connection_recvbytes(BilliardConnectionObject *self, PyObject *args) { char *freeme = NULL; Py_ssize_t res, maxlength = PY_SSIZE_T_MAX; PyObject *result = NULL; if (!PyArg_ParseTuple(args, "|" F_PY_SSIZE_T, &maxlength)) return NULL; CHECK_READABLE(self); if (maxlength < 0) { PyErr_SetString(PyExc_ValueError, "maxlength < 0"); return NULL; } res = Billiard_conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE, &freeme, maxlength); if (res < 0) { if (res == MP_BAD_MESSAGE_LENGTH) { if ((self->flags & WRITABLE) == 0) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } else { self->flags = WRITABLE; } } Billiard_SetError(PyExc_IOError, res); } else { if (freeme == NULL) { result = PyString_FromStringAndSize(self->buffer, res); } else { result = PyString_FromStringAndSize(freeme, res); PyMem_Free(freeme); } } return result; } #ifdef HAS_NEW_PY_BUFFER static PyObject * Billiard_connection_recvbytes_into(BilliardConnectionObject *self, PyObject *args) { char *freeme = NULL, *buffer = NULL; Py_ssize_t res, length, offset = 0; PyObject *result = NULL; Py_buffer pbuf; CHECK_READABLE(self); if (!PyArg_ParseTuple(args, "w*|" F_PY_SSIZE_T, &pbuf, &offset)) return NULL; buffer = pbuf.buf; length = pbuf.len; if (offset < 0) { PyErr_SetString(PyExc_ValueError, "negative offset"); goto _error; } if (offset > length) { PyErr_SetString(PyExc_ValueError, "offset too large"); goto _error; } res = Billiard_conn_recv_string(self, buffer+offset, length-offset, &freeme, PY_SSIZE_T_MAX); if (res < 0) { if (res == MP_BAD_MESSAGE_LENGTH) { if ((self->flags & WRITABLE) == 0) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } else { self->flags = WRITABLE; } } Billiard_SetError(PyExc_IOError, res); } else { if (freeme == NULL) { result = PyInt_FromSsize_t(res); } else { result = PyObject_CallFunction(Billiard_BufferTooShort, F_RBUFFER "#", freeme, res); PyMem_Free(freeme); if (result) { PyErr_SetObject(Billiard_BufferTooShort, result); Py_DECREF(result); } goto _error; } } _cleanup: PyBuffer_Release(&pbuf); return result; _error: result = NULL; goto _cleanup; } # else /* old buffer protocol */ static PyObject * Billiard_connection_recvbytes_into(BilliardConnectionObject *self, PyObject *args) { char *freeme = NULL, *buffer = NULL; Py_ssize_t length = 0, res, offset = 0; PyObject *result = NULL; CHECK_READABLE(self); if (!PyArg_ParseTuple(args, "w#|", F_PY_SSIZE_T, &buffer, &length, &offset)) return NULL; if (offset < 0) { PyErr_SetString(PyExc_ValueError, "negative offset"); goto _error; } if (offset > 0) { PyErr_SetString(PyExc_ValueError, "offset out of bounds"); goto _error; } res = Billiard_conn_recv_string(self, buffer+offset, length-offset, &freeme, PY_SSIZE_T_MAX); if (res < 0) { if (res == MP_BAD_MESSAGE_LENGTH) { if ((self->flags & WRITABLE) == 0) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } else { self->flags = WRITABLE; } } Billiard_SetError(PyExc_IOError, res); } else { if (freeme == NULL) { result = PyInt_FromSsize_t(res); } else { result = PyObject_CallFunction(Billiard_BufferTooShort, F_RBUFFER "#", freeme, res); PyMem_Free(freeme); if (result) { PyErr_SetObject(Billiard_BufferTooShort, result); Py_DECREF(result); } goto _error; } } _cleanup: return result; _error: result = NULL; goto _cleanup; } # endif /* buffer */ /* * Functions for transferring objects */ static PyObject * Billiard_connection_send_offset(BilliardConnectionObject *self, PyObject *args) { Py_buffer view; Py_ssize_t len = 0; Py_ssize_t offset = 0; ssize_t written = 0; char *buf = NULL; if (!PyArg_ParseTuple(args, "s*n", &view, &offset)) return NULL; len = view.len; buf = view.buf; // CHECK_WRITABLE(self); if (!(self->flags & WRITABLE)) { PyErr_SetString(PyExc_IOError, "connection is read-only"); \ goto bail; } if (len < 0 || len == 0) { errno = EINVAL; PyErr_SetFromErrno(PyExc_OSError); goto bail; } written = _Billiard_conn_send_offset(self->handle, buf, (size_t)len, offset); if (written < 0) { Billiard_SetError(NULL, MP_SOCKET_ERROR); goto bail; } PyBuffer_Release(&view); return PyInt_FromSsize_t((Py_ssize_t)written); bail: PyBuffer_Release(&view); return NULL; } static PyObject * Billiard_connection_send_obj(BilliardConnectionObject *self, PyObject *obj) { char *buffer; int res; Py_ssize_t length; PyObject *pickled_string = NULL; CHECK_WRITABLE(self); pickled_string = PyObject_CallFunctionObjArgs( Billiard_pickle_dumps, obj, Billiard_pickle_protocol, NULL ); if (!pickled_string) goto failure; if (PyString_AsStringAndSize(pickled_string, &buffer, &length) < 0) goto failure; res = Billiard_conn_send_string(self, buffer, (int)length); if (res != MP_SUCCESS) { Billiard_SetError(NULL, res); goto failure; } Py_XDECREF(pickled_string); Py_RETURN_NONE; failure: Py_XDECREF(pickled_string); return NULL; } static PyObject * Billiard_connection_setblocking(BilliardConnectionObject *self, PyObject *arg) { _Billiard_setblocking((int)self->handle, PyInt_AS_LONG(arg)); Py_RETURN_NONE; } static PyObject * Billiard_connection_recv_payload(BilliardConnectionObject *self) { char *freeme = NULL; Py_ssize_t res; PyObject *view = NULL; CHECK_READABLE(self); res = Billiard_conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE, &freeme, PY_SSIZE_T_MAX); if (res < 0) { if (res == MP_BAD_MESSAGE_LENGTH) { if ((self->flags & WRITABLE) == 0) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } else { self->flags = WRITABLE; } } Billiard_SetError(PyExc_IOError, res); goto error; } else { if (freeme == NULL) { view = PyBuffer_FromMemory(self->buffer, res); } else { view = PyString_FromStringAndSize(freeme, res); PyMem_Free(freeme); } } //Py_XDECREF(view); return view; error: return NULL; } static PyObject * Billiard_connection_recv_obj(BilliardConnectionObject *self) { char *freeme = NULL; Py_ssize_t res; PyObject *temp = NULL, *result = NULL; CHECK_READABLE(self); res = Billiard_conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE, &freeme, PY_SSIZE_T_MAX); if (res < 0) { if (res == MP_BAD_MESSAGE_LENGTH) { if ((self->flags & WRITABLE) == 0) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } else { self->flags = WRITABLE; } } Billiard_SetError(PyExc_IOError, res); } else { if (freeme == NULL) { temp = PyString_FromStringAndSize(self->buffer, res); } else { temp = PyString_FromStringAndSize(freeme, res); PyMem_Free(freeme); } } if (temp) result = PyObject_CallFunctionObjArgs(Billiard_pickle_loads, temp, NULL); Py_XDECREF(temp); return result; } /* * Other functions */ static PyObject * Billiard_connection_poll(BilliardConnectionObject *self, PyObject *args) { PyObject *timeout_obj = NULL; double timeout = 0.0; int res; CHECK_READABLE(self); if (!PyArg_ParseTuple(args, "|O", &timeout_obj)) return NULL; if (timeout_obj == NULL) { timeout = 0.0; } else if (timeout_obj == Py_None) { timeout = -1.0; /* block forever */ } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; if (timeout < 0.0) timeout = 0.0; } Py_BEGIN_ALLOW_THREADS res = Billiard_conn_poll(self, timeout, _save); Py_END_ALLOW_THREADS switch (res) { case TRUE: Py_RETURN_TRUE; case FALSE: Py_RETURN_FALSE; default: return Billiard_SetError(PyExc_IOError, res); } } static PyObject * Billiard_connection_fileno(BilliardConnectionObject* self) { if (self->handle == INVALID_HANDLE_VALUE) { PyErr_SetString(PyExc_IOError, "handle is invalid"); return NULL; } return PyInt_FromLong((long)self->handle); } static PyObject * Billiard_connection_close(BilliardConnectionObject *self) { if (self->handle != INVALID_HANDLE_VALUE) { Py_BEGIN_ALLOW_THREADS CLOSE(self->handle); Py_END_ALLOW_THREADS self->handle = INVALID_HANDLE_VALUE; } Py_RETURN_NONE; } static PyObject * Billiard_connection_repr(BilliardConnectionObject *self) { static char *conn_type[] = {"read-only", "write-only", "read-write"}; assert(self->flags >= 1 && self->flags <= 3); return FROM_FORMAT("<%s %s, handle %zd>", conn_type[self->flags - 1], CONNECTION_NAME, (Py_ssize_t)self->handle); } /* * Getters and setters */ static PyObject * Billiard_connection_closed(BilliardConnectionObject *self, void *closure) { return PyBool_FromLong((long)(self->handle == INVALID_HANDLE_VALUE)); } static PyObject * Billiard_connection_readable(BilliardConnectionObject *self, void *closure) { return PyBool_FromLong((long)(self->flags & READABLE)); } static PyObject * Billiard_connection_writable(BilliardConnectionObject *self, void *closure) { return PyBool_FromLong((long)(self->flags & WRITABLE)); } /* * Tables */ static PyMethodDef Billiard_connection_methods[] = { {"send_bytes", (PyCFunction)Billiard_connection_sendbytes, METH_VARARGS, "send the byte data from a readable buffer-like object"}, {"recv_bytes", (PyCFunction)Billiard_connection_recvbytes, METH_VARARGS, "receive byte data as a string"}, {"recv_bytes_into",(PyCFunction)Billiard_connection_recvbytes_into,METH_VARARGS, "receive byte data into a writeable buffer-like object\n" "returns the number of bytes read"}, {"send", (PyCFunction)Billiard_connection_send_obj, METH_O, "send a (picklable) object"}, {"send_offset", (PyCFunction)Billiard_connection_send_offset, METH_VARARGS, "send string/buffer (non-blocking)"}, {"recv", (PyCFunction)Billiard_connection_recv_obj, METH_NOARGS, "receive a (picklable) object"}, {"setblocking",(PyCFunction)Billiard_connection_setblocking, METH_O, "set socket blocking/non-blocking"}, {"recv_payload", (PyCFunction)Billiard_connection_recv_payload, METH_NOARGS, "receive raw payload (not unpickled)"}, {"poll", (PyCFunction)Billiard_connection_poll, METH_VARARGS, "whether there is any input available to be read"}, {"fileno", (PyCFunction)Billiard_connection_fileno, METH_NOARGS, "file descriptor or handle of the connection"}, {"close", (PyCFunction)Billiard_connection_close, METH_NOARGS, "close the connection"}, {NULL} /* Sentinel */ }; static PyGetSetDef Billiard_connection_getset[] = { {"closed", (getter)Billiard_connection_closed, NULL, "True if the connection is closed", NULL}, {"readable", (getter)Billiard_connection_readable, NULL, "True if the connection is readable", NULL}, {"writable", (getter)Billiard_connection_writable, NULL, "True if the connection is writable", NULL}, {NULL} }; /* * Connection type */ PyDoc_STRVAR(Billiard_connection_doc, "Connection type whose constructor signature is\n\n" " Connection(handle, readable=True, writable=True).\n\n" "The constructor does *not* duplicate the handle."); PyTypeObject CONNECTION_TYPE = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_billiard." CONNECTION_NAME, /* tp_basicsize */ sizeof(BilliardConnectionObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)Billiard_connection_dealloc, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ (reprfunc)Billiard_connection_repr, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_WEAKREFS, /* tp_doc */ Billiard_connection_doc, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ offsetof(BilliardConnectionObject, weakreflist), /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ Billiard_connection_methods, /* tp_members */ 0, /* tp_getset */ Billiard_connection_getset, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ Billiard_connection_new, }; #endif /* CONNECTION_H */ billiard-3.3.0.15/Modules/_billiard/multiprocessing.c0000644000076500000000000002312712270022117023060 0ustar asksolwheel00000000000000/* * Extension module used by multiprocessing package * * multiprocessing.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocessing.h" #ifdef SCM_RIGHTS #define HAVE_FD_TRANSFER 1 #else #define HAVE_FD_TRANSFER 0 #endif PyObject *create_win32_namespace(void); PyObject *Billiard_pickle_dumps; PyObject *Billiard_pickle_loads; PyObject *Billiard_pickle_protocol; PyObject *Billiard_BufferTooShort; /* * Function which raises exceptions based on error codes */ PyObject * Billiard_SetError(PyObject *Type, int num) { switch (num) { case MP_SUCCESS: break; #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_WindowsError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_WindowsError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_END_OF_FILE: PyErr_SetNone(PyExc_EOFError); break; case MP_EARLY_END_OF_FILE: PyErr_SetString(PyExc_IOError, "got end of file during message"); break; case MP_BAD_MESSAGE_LENGTH: PyErr_SetString(PyExc_IOError, "bad message length"); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unkown error number %d", num); } return NULL; } /* * Windows only */ #ifdef MS_WINDOWS /* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */ HANDLE sigint_event = NULL; static BOOL WINAPI ProcessingCtrlHandler(DWORD dwCtrlType) { SetEvent(sigint_event); return FALSE; } /* * Unix only */ #else /* !MS_WINDOWS */ #if HAVE_FD_TRANSFER /* Functions for transferring file descriptors between processes. Reimplements some of the functionality of the fdcred module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */ static PyObject * Billiard_multiprocessing_sendfd(PyObject *self, PyObject *args) { int conn, fd, res; char dummy_char; char buf[CMSG_SPACE(sizeof(int))]; struct msghdr msg = {0}; struct iovec dummy_iov; struct cmsghdr *cmsg; if (!PyArg_ParseTuple(args, "ii", &conn, &fd)) return NULL; dummy_iov.iov_base = &dummy_char; dummy_iov.iov_len = 1; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); msg.msg_iov = &dummy_iov; msg.msg_iovlen = 1; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); msg.msg_controllen = cmsg->cmsg_len; *(int*)CMSG_DATA(cmsg) = fd; Py_BEGIN_ALLOW_THREADS res = sendmsg(conn, &msg, 0); Py_END_ALLOW_THREADS if (res < 0) return PyErr_SetFromErrno(PyExc_OSError); Py_RETURN_NONE; } static PyObject * Billiard_multiprocessing_recvfd(PyObject *self, PyObject *args) { int conn, fd, res; char dummy_char; char buf[CMSG_SPACE(sizeof(int))]; struct msghdr msg = {0}; struct iovec dummy_iov; struct cmsghdr *cmsg; if (!PyArg_ParseTuple(args, "i", &conn)) return NULL; dummy_iov.iov_base = &dummy_char; dummy_iov.iov_len = 1; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); msg.msg_iov = &dummy_iov; msg.msg_iovlen = 1; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); msg.msg_controllen = cmsg->cmsg_len; Py_BEGIN_ALLOW_THREADS res = recvmsg(conn, &msg, 0); Py_END_ALLOW_THREADS if (res < 0) return PyErr_SetFromErrno(PyExc_OSError); fd = *(int*)CMSG_DATA(cmsg); return Py_BuildValue("i", fd); } #endif /* HAVE_FD_TRANSFER */ #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject* Billiard_multiprocessing_address_of_buffer(PyObject *self, PyObject *obj) { void *buffer; Py_ssize_t buffer_len; if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0) return NULL; return Py_BuildValue("N" F_PY_SSIZE_T, PyLong_FromVoidPtr(buffer), buffer_len); } #if !defined(MS_WINDOWS) static PyObject * Billiard_read(PyObject *self, PyObject *args) { int fd; Py_buffer view; Py_ssize_t buflen, recvlen = 0; char *buf = NULL; Py_ssize_t n = 0; if (!PyArg_ParseTuple(args, "iw*|n", &fd, &view, &recvlen)) return NULL; buflen = view.len; buf = view.buf; if (recvlen < 0) { PyBuffer_Release(&view); PyErr_SetString(PyExc_ValueError, "negative len for read"); return NULL; } if (recvlen == 0) { recvlen = buflen; } if (buflen < recvlen) { PyBuffer_Release(&view); PyErr_SetString(PyExc_ValueError, "Buffer too small for requested bytes"); return NULL; } if (buflen < 0 || buflen == 0) { errno = EINVAL; goto bail; } // Requires Python 2.7 //if (!_PyVerify_fd(fd)) goto bail; Py_BEGIN_ALLOW_THREADS n = read(fd, buf, recvlen); Py_END_ALLOW_THREADS if (n < 0) goto bail; PyBuffer_Release(&view); return PyInt_FromSsize_t(n); bail: PyBuffer_Release(&view); return PyErr_SetFromErrno(PyExc_OSError); } # endif /* !MS_WINDOWS */ /* * Function table */ static PyMethodDef Billiard_module_methods[] = { {"address_of_buffer", Billiard_multiprocessing_address_of_buffer, METH_O, "address_of_buffer(obj) -> int\n\n" "Return address of obj assuming obj supports buffer inteface"}, #if HAVE_FD_TRANSFER {"sendfd", Billiard_multiprocessing_sendfd, METH_VARARGS, "sendfd(sockfd, fd) -> None\n\n" "Send file descriptor given by fd over the unix domain socket\n" "whose file decriptor is sockfd"}, {"recvfd", Billiard_multiprocessing_recvfd, METH_VARARGS, "recvfd(sockfd) -> fd\n\n" "Receive a file descriptor over a unix domain socket\n" "whose file decriptor is sockfd"}, #endif #if !defined(MS_WINDOWS) {"read", Billiard_read, METH_VARARGS, "read(fd, buffer) -> bytes\n\n" "Read from file descriptor into buffer."}, #endif {NULL} }; /* * Initialize */ PyMODINIT_FUNC init_billiard(void) { PyObject *module, *temp, *value; /* Initialize module */ module = Py_InitModule("_billiard", Billiard_module_methods); if (!module) return; /* Get copy of objects from pickle */ temp = PyImport_ImportModule(PICKLE_MODULE); if (!temp) return; Billiard_pickle_dumps = PyObject_GetAttrString(temp, "dumps"); Billiard_pickle_loads = PyObject_GetAttrString(temp, "loads"); Billiard_pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL"); Py_XDECREF(temp); /* Get copy of BufferTooShort */ temp = PyImport_ImportModule("billiard"); if (!temp) return; Billiard_BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort"); Py_XDECREF(temp); /* Add connection type to module */ if (PyType_Ready(&BilliardConnectionType) < 0) return; Py_INCREF(&BilliardConnectionType); PyModule_AddObject(module, "Connection", (PyObject*)&BilliardConnectionType); #if defined(MS_WINDOWS) || \ (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) /* Add SemLock type to module */ if (PyType_Ready(&BilliardSemLockType) < 0) return; Py_INCREF(&BilliardSemLockType); PyDict_SetItemString(BilliardSemLockType.tp_dict, "SEM_VALUE_MAX", Py_BuildValue("i", SEM_VALUE_MAX)); PyModule_AddObject(module, "SemLock", (PyObject*)&BilliardSemLockType); #endif #ifdef MS_WINDOWS /* Add PipeConnection to module */ if (PyType_Ready(&BilliardPipeConnectionType) < 0) return; Py_INCREF(&BilliardPipeConnectionType); PyModule_AddObject(module, "PipeConnection", (PyObject*)&BilliardPipeConnectionType); /* Initialize win32 class and add to multiprocessing */ temp = create_win32_namespace(); if (!temp) return; PyModule_AddObject(module, "win32", temp); /* Initialize the event handle used to signal Ctrl-C */ sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL); if (!sigint_event) { PyErr_SetFromWindowsErr(0); return; } if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) { PyErr_SetFromWindowsErr(0); return; } #endif /* Add configuration macros */ temp = PyDict_New(); if (!temp) return; #define ADD_FLAG(name) \ value = Py_BuildValue("i", name); \ if (value == NULL) { Py_DECREF(temp); return; } \ if (PyDict_SetItemString(temp, #name, value) < 0) { \ Py_DECREF(temp); Py_DECREF(value); return; } \ Py_DECREF(value) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_FD_TRANSFER ADD_FLAG(HAVE_FD_TRANSFER); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", temp) < 0) return; } billiard-3.3.0.15/Modules/_billiard/multiprocessing.h0000644000076500000000000001040612270022117023061 0ustar asksolwheel00000000000000#ifndef MULTIPROCESSING_H #define MULTIPROCESSING_H #define PY_SSIZE_T_CLEAN #ifdef __sun /* The control message API is only available on Solaris if XPG 4.2 or later is requested. */ #define _XOPEN_SOURCE 500 #endif #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX #else # include /* O_CREAT and O_EXCL */ # include # include # include # include /* htonl() and ntohl() */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # include typedef sem_t *SEM_HANDLE; # endif # define HANDLE int # define SOCKET int # define BOOL int # define UINT32 uint32_t # define INT32 int32_t # define TRUE 1 # define FALSE 0 # define INVALID_HANDLE_VALUE (-1) #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Make sure Py_ssize_t available */ #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; # define PY_SSIZE_T_MAX INT_MAX # define PY_SSIZE_T_MIN INT_MIN # define F_PY_SSIZE_T "i" # define PyInt_FromSsize_t(n) PyInt_FromLong((long)n) #else # define F_PY_SSIZE_T "n" #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG) # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE # define F_DWORD "k" # define T_DWORD T_ULONG #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif #if PY_VERSION_HEX >= 0x03000000 # define F_RBUFFER "y" #else # define F_RBUFFER "s" #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_END_OF_FILE (-1002) #define MP_EARLY_END_OF_FILE (-1003) #define MP_BAD_MESSAGE_LENGTH (-1004) #define MP_SOCKET_ERROR (-1005) #define MP_EXCEPTION_HAS_BEEN_SET (-1006) PyObject *Billiard_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyObject *Billiard_pickle_dumps; extern PyObject *Billiard_pickle_loads; extern PyObject *Billiard_pickle_protocol; extern PyObject *Billiard_BufferTooShort; extern PyTypeObject BilliardSemLockType; extern PyTypeObject BilliardConnectionType; extern PyTypeObject BilliardPipeConnectionType; extern HANDLE sigint_event; /* * Py3k compatibility */ #if PY_VERSION_HEX >= 0x03000000 # define PICKLE_MODULE "pickle" # define FROM_FORMAT PyUnicode_FromFormat # define PyInt_FromLong PyLong_FromLong # define PyInt_FromSsize_t PyLong_FromSsize_t #else # define PICKLE_MODULE "cPickle" # define FROM_FORMAT PyString_FromFormat #endif #ifndef PyVarObject_HEAD_INIT # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif #ifndef Py_TPFLAGS_HAVE_WEAKREFS # define Py_TPFLAGS_HAVE_WEAKREFS 0 #endif /* * Connection definition */ #define CONNECTION_BUFFER_SIZE 131072 typedef struct { PyObject_HEAD HANDLE handle; int flags; PyObject *weakreflist; char buffer[CONNECTION_BUFFER_SIZE]; } BilliardConnectionObject; /* * Miscellaneous */ #define MAX_MESSAGE_LENGTH 0x7fffffff #ifndef MIN # define MIN(x, y) ((x) < (y) ? x : y) # define MAX(x, y) ((x) > (y) ? x : y) #endif #endif /* MULTIPROCESSING_H */ billiard-3.3.0.15/Modules/_billiard/pipe_connection.c0000644000076500000000000000717012270022117023005 0ustar asksolwheel00000000000000/* * A type which wraps a pipe handle in message oriented mode * * pipe_connection.c * * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt */ #include "multiprocessing.h" #define CLOSE(h) CloseHandle(h) /* * Send string to the pipe; assumes in message oriented mode */ static Py_ssize_t Billiard_conn_send_string(BilliardConnectionObject *conn, char *string, size_t length) { DWORD amount_written; BOOL ret; Py_BEGIN_ALLOW_THREADS ret = WriteFile(conn->handle, string, length, &amount_written, NULL); Py_END_ALLOW_THREADS if (ret == 0 && GetLastError() == ERROR_NO_SYSTEM_RESOURCES) { PyErr_Format(PyExc_ValueError, "Cannnot send %" PY_FORMAT_SIZE_T "d bytes over connection", length); return MP_STANDARD_ERROR; } return ret ? MP_SUCCESS : MP_STANDARD_ERROR; } /* * Attempts to read into buffer, or if buffer too small into *newbuffer. * * Returns number of bytes read. Assumes in message oriented mode. */ static Py_ssize_t Billiard_conn_recv_string(BilliardConnectionObject *conn, char *buffer, size_t buflength, char **newbuffer, size_t maxlength) { DWORD left, length, full_length, err; BOOL ret; *newbuffer = NULL; Py_BEGIN_ALLOW_THREADS ret = ReadFile(conn->handle, buffer, MIN(buflength, maxlength), &length, NULL); Py_END_ALLOW_THREADS if (ret) return length; err = GetLastError(); if (err != ERROR_MORE_DATA) { if (err == ERROR_BROKEN_PIPE) return MP_END_OF_FILE; return MP_STANDARD_ERROR; } if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left)) return MP_STANDARD_ERROR; full_length = length + left; if (full_length > maxlength) return MP_BAD_MESSAGE_LENGTH; *newbuffer = PyMem_Malloc(full_length); if (*newbuffer == NULL) return MP_MEMORY_ERROR; memcpy(*newbuffer, buffer, length); Py_BEGIN_ALLOW_THREADS ret = ReadFile(conn->handle, *newbuffer+length, left, &length, NULL); Py_END_ALLOW_THREADS if (ret) { assert(length == left); return full_length; } else { PyMem_Free(*newbuffer); return MP_STANDARD_ERROR; } } /* * Check whether any data is available for reading */ static int Billiard_conn_poll(BilliardConnectionObject *conn, double timeout, PyThreadState *_save) { DWORD bytes, deadline, delay; int difference, res; BOOL block = FALSE; if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL)) return MP_STANDARD_ERROR; if (timeout == 0.0) return bytes > 0; if (timeout < 0.0) block = TRUE; else /* XXX does not check for overflow */ deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5); Sleep(0); for (delay = 1 ; ; delay += 1) { if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL)) return MP_STANDARD_ERROR; else if (bytes > 0) return TRUE; if (!block) { difference = deadline - GetTickCount(); if (difference < 0) return FALSE; if ((int)delay > difference) delay = difference; } if (delay > 20) delay = 20; Sleep(delay); /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) return MP_EXCEPTION_HAS_BEEN_SET; } } /* * "connection.h" defines the PipeConnection type using the definitions above */ #define CONNECTION_NAME "PipeConnection" #define CONNECTION_TYPE BilliardPipeConnectionType #include "connection.h" billiard-3.3.0.15/Modules/_billiard/semaphore.c0000644000076500000000000004444512270022117021622 0ustar asksolwheel00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocessing.h" enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; long last_tid; int count; int maxvalue; int kind; char *name; } BilliardSemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _Billiard_GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _Billiard_GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObject(handle, 0)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } static PyObject * Billiard_semlock_acquire(BilliardSemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1; double timeout; PyObject *timeout_obj = Py_None; DWORD res, full_msecs, msecs, start, ticks; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without blocking */ if (WaitForSingleObject(self->handle, 0) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } msecs = full_msecs; start = GetTickCount(); for ( ; ; ) { HANDLE handles[2] = {self->handle, sigint_event}; /* do the wait */ Py_BEGIN_ALLOW_THREADS ResetEvent(sigint_event); res = WaitForMultipleObjects(2, handles, FALSE, msecs); Py_END_ALLOW_THREADS /* handle result */ if (res != WAIT_OBJECT_0 + 1) break; /* got SIGINT so give signal handler a chance to run */ Sleep(1); /* if this is main thread let KeyboardInterrupt be raised */ if (PyErr_CheckSignals()) return NULL; /* recalculate timeout */ if (msecs != INFINITE) { ticks = GetTickCount(); if ((DWORD)(ticks - start) >= full_msecs) Py_RETURN_FALSE; msecs = full_msecs - (ticks - start); } } /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or " "WaitForMultipleObjects() gave unrecognized " "value %d", res); return NULL; } } static PyObject * Billiard_semlock_release(BilliardSemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif //#ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) Billiard_sem_timedwait_save(sem,deadline,_save) int Billiard_sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } //#endif /* !HAVE_SEM_TIMEDWAIT */ static PyObject * Billiard_semlock_acquire(BilliardSemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1, res; double timeout; PyObject *timeout_obj = Py_None; struct timespec deadline = {0}; struct timeval now; long sec, nsec; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } if (timeout_obj != Py_None) { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; if (timeout < 0.0) timeout = 0.0; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } sec = (long) timeout; nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } do { Py_BEGIN_ALLOW_THREADS if (blocking && timeout_obj == Py_None) res = sem_wait(self->handle); else if (!blocking) res = sem_trywait(self->handle); else res = sem_timedwait(self->handle, &deadline); Py_END_ALLOW_THREADS if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); if (res < 0) { if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } static PyObject * Billiard_semlock_release(BilliardSemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * Billiard_newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { BilliardSemLockObject *self; self = PyObject_New(BilliardSemLockObject, type); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } static PyObject * Billiard_semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { SEM_HANDLE handle = SEM_FAILED; int kind, maxvalue, value, unlink; PyObject *result; char *name, *name_copy = NULL; static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) goto failure; strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = Billiard_newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); Billiard_SetError(NULL, MP_STANDARD_ERROR); return NULL; } static PyObject * Billiard_semlock_rebuild(PyTypeObject *type, PyObject *args) { SEM_HANDLE handle; int kind, maxvalue; char *name; if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", &handle, &kind, &maxvalue, &name)) return NULL; #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) return NULL; } #endif return Billiard_newsemlockobject(type, handle, kind, maxvalue, name); } static void Billiard_semlock_dealloc(BilliardSemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); PyObject_Del(self); } static PyObject * Billiard_semlock_count(BilliardSemLockObject *self) { return PyInt_FromLong((long)self->count); } static PyObject * Billiard_semlock_ismine(BilliardSemLockObject *self) { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } static PyObject * Billiard_semlock_getvalue(BilliardSemLockObject *self) { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyInt_FromLong((long)sval); #endif } static PyObject * Billiard_semlock_iszero(BilliardSemLockObject *self) { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return Billiard_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } static PyObject * Billiard_semlock_afterfork(BilliardSemLockObject *self) { self->count = 0; Py_RETURN_NONE; } static PyObject * Billiard_semlock_unlink(PyObject *ignore, PyObject *args) { char *name; if (!PyArg_ParseTuple(args, "s", &name)) return NULL; if (SEM_UNLINK(name) < 0) { Billiard_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } /* * Semaphore methods */ static PyMethodDef Billiard_semlock_methods[] = { {"acquire", (PyCFunction)Billiard_semlock_acquire, METH_VARARGS | METH_KEYWORDS, "acquire the semaphore/lock"}, {"release", (PyCFunction)Billiard_semlock_release, METH_NOARGS, "release the semaphore/lock"}, {"__enter__", (PyCFunction)Billiard_semlock_acquire, METH_VARARGS | METH_KEYWORDS, "enter the semaphore/lock"}, {"__exit__", (PyCFunction)Billiard_semlock_release, METH_VARARGS, "exit the semaphore/lock"}, {"_count", (PyCFunction)Billiard_semlock_count, METH_NOARGS, "num of `acquire()`s minus num of `release()`s for this process"}, {"_is_mine", (PyCFunction)Billiard_semlock_ismine, METH_NOARGS, "whether the lock is owned by this thread"}, {"_get_value", (PyCFunction)Billiard_semlock_getvalue, METH_NOARGS, "get the value of the semaphore"}, {"_is_zero", (PyCFunction)Billiard_semlock_iszero, METH_NOARGS, "returns whether semaphore has value zero"}, {"_rebuild", (PyCFunction)Billiard_semlock_rebuild, METH_VARARGS | METH_CLASS, ""}, {"_after_fork", (PyCFunction)Billiard_semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, {"sem_unlink", (PyCFunction)Billiard_semlock_unlink, METH_VARARGS | METH_STATIC, "unlink the named semaphore using sem_unlink()"}, {NULL} }; /* * Member table */ static PyMemberDef Billiard_semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(BilliardSemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(BilliardSemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(BilliardSemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(BilliardSemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject BilliardSemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_billiard.SemLock", /* tp_basicsize */ sizeof(BilliardSemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)Billiard_semlock_dealloc, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ Billiard_semlock_methods, /* tp_members */ Billiard_semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ Billiard_semlock_new, }; billiard-3.3.0.15/Modules/_billiard/socket_connection.c0000644000076500000000000001324112270022117023334 0ustar asksolwheel00000000000000/* * A type which wraps a socket * * socket_connection.c * * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt */ #include "multiprocessing.h" #ifdef MS_WINDOWS # define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0) # define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0) # define CLOSE(h) closesocket((SOCKET)h) #else # define WRITE(h, buffer, length) write(h, buffer, length) # define READ(h, buffer, length) read(h, buffer, length) # define CLOSE(h) close(h) #endif /* * Send string to file descriptor */ void _Billiard_setblocking(int fd, int blocking) { #ifdef MS_WINDOWS unsigned long mode = blocking ? 0 : 1; ioctlsocket(fd, FIONBIO, &mode); #else int flags = fcntl(fd, F_GETFL, 0); if (flags > 0) { flags = blocking ? (flags &~ O_NONBLOCK) : (flags | O_NONBLOCK); fcntl(fd, F_SETFL, flags); } #endif } ssize_t _Billiard_conn_send_offset(HANDLE fd, char *string, Py_ssize_t len, Py_ssize_t offset) { char *p = string; p += offset; return WRITE(fd, p, (size_t)len - offset); } static Py_ssize_t _Billiard_conn_sendall(HANDLE h, char *string, size_t length) { char *p = string; Py_ssize_t res; while (length > 0) { res = WRITE(h, p, length); if (res < 0) return MP_SOCKET_ERROR; length -= res; p += res; } return MP_SUCCESS; } /* * Receive string of exact length from file descriptor */ static Py_ssize_t _Billiard_conn_recvall(HANDLE h, char *buffer, size_t length) { size_t remaining = length; Py_ssize_t temp; char *p = buffer; while (remaining > 0) { temp = READ(h, p, remaining); if (temp <= 0) { if (temp == 0) return remaining == length ? MP_END_OF_FILE : MP_EARLY_END_OF_FILE; else return temp; } remaining -= temp; p += temp; } return MP_SUCCESS; } /* * Send a string prepended by the string length in network byte order */ static Py_ssize_t Billiard_conn_send_string(BilliardConnectionObject *conn, char *string, size_t length) { Py_ssize_t res; /* The "header" of the message is a 32 bit unsigned number (in network order) which specifies the length of the "body". If the message is shorter than about 16kb then it is quicker to combine the "header" and the "body" of the message and send them at once. */ if (length < (16*1024)) { char *message; message = PyMem_Malloc(length+4); if (message == NULL) return MP_MEMORY_ERROR; *(UINT32*)message = htonl((UINT32)length); memcpy(message+4, string, length); Py_BEGIN_ALLOW_THREADS res = _Billiard_conn_sendall(conn->handle, message, length+4); Py_END_ALLOW_THREADS PyMem_Free(message); } else { UINT32 lenbuff; if (length > MAX_MESSAGE_LENGTH) return MP_BAD_MESSAGE_LENGTH; lenbuff = htonl((UINT32)length); Py_BEGIN_ALLOW_THREADS res = _Billiard_conn_sendall(conn->handle, (char*)&lenbuff, 4); if (res == MP_SUCCESS) res = _Billiard_conn_sendall(conn->handle, string, length); Py_END_ALLOW_THREADS } return res; } /* * Attempts to read into buffer, or failing that into *newbuffer * * Returns number of bytes read. */ static Py_ssize_t Billiard_conn_recv_string(BilliardConnectionObject *conn, char *buffer, size_t buflength, char **newbuffer, size_t maxlength) { int res; UINT32 ulength; *newbuffer = NULL; Py_BEGIN_ALLOW_THREADS res = _Billiard_conn_recvall(conn->handle, (char*)&ulength, 4); Py_END_ALLOW_THREADS if (res < 0) return res; ulength = ntohl(ulength); if (ulength > maxlength) return MP_BAD_MESSAGE_LENGTH; if (ulength <= buflength) { Py_BEGIN_ALLOW_THREADS res = _Billiard_conn_recvall(conn->handle, buffer, (size_t)ulength); Py_END_ALLOW_THREADS return res < 0 ? res : ulength; } else { *newbuffer = PyMem_Malloc((size_t)ulength); if (*newbuffer == NULL) return MP_MEMORY_ERROR; Py_BEGIN_ALLOW_THREADS res = _Billiard_conn_recvall(conn->handle, *newbuffer, (size_t)ulength); Py_END_ALLOW_THREADS return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength; } } /* * Check whether any data is available for reading -- neg timeout blocks */ static int Billiard_conn_poll(BilliardConnectionObject *conn, double timeout, PyThreadState *_save) { int res; fd_set rfds; /* * Verify the handle, issue 3321. Not required for windows. */ #ifndef MS_WINDOWS if (((int)conn->handle) < 0 || ((int)conn->handle) >= FD_SETSIZE) { Py_BLOCK_THREADS PyErr_SetString(PyExc_IOError, "handle out of range in select()"); Py_UNBLOCK_THREADS return MP_EXCEPTION_HAS_BEEN_SET; } #endif FD_ZERO(&rfds); FD_SET((SOCKET)conn->handle, &rfds); if (timeout < 0.0) { res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL); } else { struct timeval tv; tv.tv_sec = (long)timeout; tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5); res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv); } if (res < 0) { return MP_SOCKET_ERROR; } else if (FD_ISSET(conn->handle, &rfds)) { return TRUE; } else { assert(res == 0); return FALSE; } } /* * "connection.h" defines the Connection type using defs above */ #define CONNECTION_NAME "Connection" #define CONNECTION_TYPE BilliardConnectionType #include "connection.h" billiard-3.3.0.15/Modules/_billiard/win32_functions.c0000644000076500000000000001566312270022117022671 0ustar asksolwheel00000000000000/* * Win32 functions used by multiprocessing package * * win32_functions.c * * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt */ #include "multiprocessing.h" #define WIN32_FUNCTION(func) \ {#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""} #define WIN32_CONSTANT(fmt, con) \ PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con)) static PyObject * win32_CloseHandle(PyObject *self, PyObject *args) { HANDLE hObject; BOOL success; if (!PyArg_ParseTuple(args, F_HANDLE, &hObject)) return NULL; Py_BEGIN_ALLOW_THREADS success = CloseHandle(hObject); Py_END_ALLOW_THREADS if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_ConnectNamedPipe(PyObject *self, PyObject *args) { HANDLE hNamedPipe; LPOVERLAPPED lpOverlapped; BOOL success; if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER, &hNamedPipe, &lpOverlapped)) return NULL; Py_BEGIN_ALLOW_THREADS success = ConnectNamedPipe(hNamedPipe, lpOverlapped); Py_END_ALLOW_THREADS if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_CreateFile(PyObject *self, PyObject *args) { LPCTSTR lpFileName; DWORD dwDesiredAccess; DWORD dwShareMode; LPSECURITY_ATTRIBUTES lpSecurityAttributes; DWORD dwCreationDisposition; DWORD dwFlagsAndAttributes; HANDLE hTemplateFile; HANDLE handle; if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER F_DWORD F_DWORD F_HANDLE, &lpFileName, &dwDesiredAccess, &dwShareMode, &lpSecurityAttributes, &dwCreationDisposition, &dwFlagsAndAttributes, &hTemplateFile)) return NULL; Py_BEGIN_ALLOW_THREADS handle = CreateFile(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile); Py_END_ALLOW_THREADS if (handle == INVALID_HANDLE_VALUE) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_CreateNamedPipe(PyObject *self, PyObject *args) { LPCTSTR lpName; DWORD dwOpenMode; DWORD dwPipeMode; DWORD nMaxInstances; DWORD nOutBufferSize; DWORD nInBufferSize; DWORD nDefaultTimeOut; LPSECURITY_ATTRIBUTES lpSecurityAttributes; HANDLE handle; if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD F_DWORD F_DWORD F_DWORD F_POINTER, &lpName, &dwOpenMode, &dwPipeMode, &nMaxInstances, &nOutBufferSize, &nInBufferSize, &nDefaultTimeOut, &lpSecurityAttributes)) return NULL; Py_BEGIN_ALLOW_THREADS handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes); Py_END_ALLOW_THREADS if (handle == INVALID_HANDLE_VALUE) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_ExitProcess(PyObject *self, PyObject *args) { UINT uExitCode; if (!PyArg_ParseTuple(args, "I", &uExitCode)) return NULL; #if defined(Py_DEBUG) SetErrorMode(SEM_FAILCRITICALERRORS|SEM_NOALIGNMENTFAULTEXCEPT|SEM_NOGPFAULTERRORBOX|SEM_NOOPENFILEERRORBOX); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG); #endif ExitProcess(uExitCode); return NULL; } static PyObject * win32_GetLastError(PyObject *self, PyObject *args) { return Py_BuildValue(F_DWORD, GetLastError()); } static PyObject * win32_OpenProcess(PyObject *self, PyObject *args) { DWORD dwDesiredAccess; BOOL bInheritHandle; DWORD dwProcessId; HANDLE handle; if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD, &dwDesiredAccess, &bInheritHandle, &dwProcessId)) return NULL; handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId); if (handle == NULL) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_SetNamedPipeHandleState(PyObject *self, PyObject *args) { HANDLE hNamedPipe; PyObject *oArgs[3]; DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL}; int i; if (!PyArg_ParseTuple(args, F_HANDLE "OOO", &hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2])) return NULL; PyErr_Clear(); for (i = 0 ; i < 3 ; i++) { if (oArgs[i] != Py_None) { dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]); if (PyErr_Occurred()) return NULL; pArgs[i] = &dwArgs[i]; } } if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2])) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_WaitNamedPipe(PyObject *self, PyObject *args) { LPCTSTR lpNamedPipeName; DWORD nTimeOut; BOOL success; if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut)) return NULL; Py_BEGIN_ALLOW_THREADS success = WaitNamedPipe(lpNamedPipeName, nTimeOut); Py_END_ALLOW_THREADS if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyMethodDef win32_methods[] = { WIN32_FUNCTION(CloseHandle), WIN32_FUNCTION(GetLastError), WIN32_FUNCTION(OpenProcess), WIN32_FUNCTION(ExitProcess), WIN32_FUNCTION(ConnectNamedPipe), WIN32_FUNCTION(CreateFile), WIN32_FUNCTION(CreateNamedPipe), WIN32_FUNCTION(SetNamedPipeHandleState), WIN32_FUNCTION(WaitNamedPipe), {NULL} }; PyTypeObject Win32Type = { PyVarObject_HEAD_INIT(NULL, 0) }; PyObject * create_win32_namespace(void) { Win32Type.tp_name = "_billiard.win32"; Win32Type.tp_methods = win32_methods; if (PyType_Ready(&Win32Type) < 0) return NULL; Py_INCREF(&Win32Type); WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS); WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY); WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED); WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT); WIN32_CONSTANT(F_DWORD, GENERIC_READ); WIN32_CONSTANT(F_DWORD, GENERIC_WRITE); WIN32_CONSTANT(F_DWORD, INFINITE); WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER); WIN32_CONSTANT(F_DWORD, OPEN_EXISTING); WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX); WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND); WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE); WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE); WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES); WIN32_CONSTANT(F_DWORD, PIPE_WAIT); WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS); WIN32_CONSTANT("i", NULL); return (PyObject*)&Win32Type; } billiard-3.3.0.15/pavement.py0000644000076500000000000000634212270022117016325 0ustar asksolwheel00000000000000import sys from paver.easy import * from paver import doctools from paver.setuputils import setup PYCOMPILE_CACHES = ["*.pyc", "*$py.class"] options( sphinx=Bunch(builddir=".build"), ) def sphinx_builddir(options): return path("docs") / options.sphinx.builddir / "html" @task def clean_docs(options): sphinx_builddir(options).rmtree() @task @needs("clean_docs", "paver.doctools.html") def html(options): destdir = path("Documentation") destdir.rmtree() builtdocs = sphinx_builddir(options) builtdocs.move(destdir) @task @needs("paver.doctools.html") def qhtml(options): destdir = path("Documentation") builtdocs = sphinx_builddir(options) sh("rsync -az %s/ %s" % (builtdocs, destdir)) @task @needs("clean_docs", "paver.doctools.html") def ghdocs(options): builtdocs = sphinx_builddir(options) sh("git checkout gh-pages && \ cp -r %s/* . && \ git commit . -m 'Rendered documentation for Github Pages.' && \ git push origin gh-pages && \ git checkout master" % builtdocs) @task @needs("clean_docs", "paver.doctools.html") def upload_pypi_docs(options): builtdocs = path("docs") / options.builddir / "html" sh("%s setup.py upload_sphinx --upload-dir='%s'" % ( sys.executable, builtdocs)) @task @needs("upload_pypi_docs", "ghdocs") def upload_docs(options): pass @task @cmdopts([ ("noerror", "E", "Ignore errors"), ]) def flake8(options): noerror = getattr(options, "noerror", False) complexity = getattr(options, "complexity", 22) sh("""flake8 billiard | perl -mstrict -mwarnings -nle' my $ignore = m/too complex \((\d+)\)/ && $1 le %s; if (! $ignore) { print STDERR; our $FOUND_FLAKE = 1 } }{exit $FOUND_FLAKE; '""" % (complexity, ), ignore_error=noerror) @task @cmdopts([ ("noerror", "E", "Ignore errors"), ]) def flakeplus(options): noerror = getattr(options, "noerror", False) sh("flakeplus billiard --2.6", ignore_error=noerror) @task @cmdopts([ ("noerror", "E", "Ignore errors") ]) def flakes(options): flake8(options) flakeplus(options) @task def bump(options): sh("contrib/release/bump_version.py billiard/__init__.py") @task @cmdopts([ ("coverage", "c", "Enable coverage"), ("verbose", "V", "Make more noise"), ]) def test(options): cmd = "nosetests" if getattr(options, "coverage", False): cmd += " --with-coverage3" if getattr(options, "verbose", False): cmd += " --verbosity=2" sh(cmd) @task @cmdopts([ ("noerror", "E", "Ignore errors"), ]) def pep8(options): noerror = getattr(options, "noerror", False) return sh("""find . -name "*.py" | xargs pep8 | perl -nle'\ print; $a=1 if $_}{exit($a)'""", ignore_error=noerror) @task def removepyc(options): sh("find . -type f -a \\( %s \\) | xargs rm" % ( " -o ".join("-name '%s'" % (pat, ) for pat in PYCOMPILE_CACHES), )) @task @needs("removepyc") def gitclean(options): sh("git clean -xdn") @task @needs("removepyc") def gitcleanforce(options): sh("git clean -xdf") @task @needs("flakes", "test", "gitclean") def releaseok(options): pass @task @needs("releaseok", "removepyc", "upload_docs") def release(options): pass billiard-3.3.0.15/PKG-INFO0000644000076500000000000006107012276217622015245 0ustar asksolwheel00000000000000Metadata-Version: 1.1 Name: billiard Version: 3.3.0.15 Summary: Python multiprocessing fork with improvements and bugfixes Home-page: http://github.com/celery/billiard Author: Ask Solem', Author-email: ask@celeryproject.org License: BSD Description: ======== billiard ======== :version: 3.3.0.15 About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant is intended to be compatible with Python 2.4 and 2.5, and will draw it's fixes/improvements from python-trunk. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessin backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. .. image:: https://d2weczhvl823v0.cloudfront.net/celery/billiard/trend.png :alt: Bitdeli badge :target: https://bitdeli.com/free =========== Changes =========== 3.3.0.15 - 2014-02-10 --------------------- - Pool: Fixed "cannot join process not started" error. - Now uses billiard.py2 and billiard.py3 specific packages that are installed depending on the python version used. This way the installation will not import version specific modules (and possibly crash). 3.3.0.14 - 2014-01-17 --------------------- - Fixed problem with our backwards compatible ``bytes`` wrapper (Issue #103). - No longer expects frozen applications to have a valid ``__file__`` attribute. Fix contributed by George Sibble. 3.3.0.13 - 2013-12-13 --------------------- - Fixes compatability with Python < 2.7.6 - No longer attempts to handle ``SIGBUS`` Contributed by Vishal Vatsa. - Non-thread based pool now only handles signals: ``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``, ``SIGUSR2``. - setup.py: Only show compilation warning for build related commands. 3.3.0.12 - 2013-12-09 --------------------- - Fixed installation for Python 3. Contributed by Rickert Mulder. - Pool: Fixed bug with maxtasksperchild. Fix contributed by Ionel Cristian Maries. - Pool: Fixed bug in maintain_pool. 3.3.0.11 - 2013-12-03 --------------------- - Fixed Unicode error when installing the distribution (Issue #89). - Daemonic processes are now allowed to have children. But note that it will not be possible to automatically terminate them when the process exits. See discussion at https://github.com/celery/celery/issues/1709 - Pool: Would not always be able to detect that a process exited. 3.3.0.10 - 2013-12-02 --------------------- - Windows: Fixed problem with missing ``WAITABANDONED_0`` Fix contributed by Matthias Wagner - Windows: PipeConnection can now be inherited. Fix contributed by Matthias Wagner 3.3.0.9 - 2013-12-02 -------------------- - Temporary workaround for Celery maxtasksperchild issue. Fix contributed by Ionel Cristian Maries. 3.3.0.8 - 2013-11-21 -------------------- - Now also sets ``multiprocessing.current_process`` for compatibility with loggings ``processName`` field. 3.3.0.7 - 2013-11-15 -------------------- - Fixed compatibility with PyPy 2.1 + 2.2. - Fixed problem in pypy detection. Fix contributed by Tin Tvrtkovic. - Now uses ``ctypes.find_library`` instead of hardcoded path to find the OS X CoreServices framework. Fix contributed by Moritz Kassner. 3.3.0.6 - 2013-11-12 -------------------- - Now works without C extension again. - New ``_billiard.read(fd, buffer, [len, ])` function implements os.read with buffer support (new buffer API) - New pure-python implementation of ``Connection.send_offset``. 3.3.0.5 - 2013-11-11 -------------------- - All platforms except for Windows/PyPy/Jython now requires the C extension. 3.3.0.4 - 2013-11-11 -------------------- - Fixed problem with Python3 and setblocking. 3.3.0.3 - 2013-11-09 -------------------- - Now works on Windows again. 3.3.0.2 - 2013-11-08 -------------------- - ApplyResult.terminate() may be set to signify that the job must not be executed. It can be used in combination with Pool.terminate_job. - Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments to set the read or write end of the pipe to be nonblocking. - Pool: Log message included exception info but exception happened in another process so the resulting traceback was wrong. - Pool: Worker process can now prepare results before they are sent back to the main process (using ``Worker.prepare_result``). 3.3.0.1 - 2013-11-04 -------------------- - Pool: New ``correlation_id`` argument to ``apply_async`` can be used to set a related id for the ``ApplyResult`` object returned: >>> r = pool.apply_async(target, args, kwargs, correlation_id='foo') >>> r.correlation_id 'foo' - Pool: New callback `on_process_exit` is called when a pool process exits, with signature ``(pid, exitcode)``. Contributed by Daniel M. Taub. - Pool: Improved the too many restarts detection. 3.3.0.0 - 2013-10-14 -------------------- - Dual code base now runs on Python 2.6+ and Python 3. - No longer compatible with Python 2.5 - Includes many changes from multiprocessing in 3.4. - Now uses ``time.monotonic`` when available, also including fallback implementations for Linux and OS X. - No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE Contributed by Kevin Blackham - ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing. It's better to import these from multiprocessing directly now so that there aren't multiple registries. - New `billiard.queues._SimpleQueue` that does not use semaphores. - Pool: Can now be extended to support using multiple IPC queues. - Pool: Can now use async I/O to write to pool IPC queues. - Pool: New ``Worker.on_loop_stop`` handler can be used to add actions at pool worker process shutdown. Note that, like all finalization handlers, there is no guarantee that this will be executed. Contributed by dmtaub. 2.7.3.30 - 2013-06-28 --------------------- - Fixed ImportError in billiard._ext 2.7.3.29 - 2013-06-28 --------------------- - Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55) Fix contributed by Krzysztof Jagiello. - Process now releases logging locks after fork. This previously happened in Pool, but it was done too late as processes logs when they bootstrap. - Pool.terminate_job now ignores `No such process` errors. - billiard.Pool entrypoint did not support new arguments to billiard.pool.Pool - Connection inbound buffer size increased from 1kb to 128kb. - C extension cleaned up by properly adding a namespace to symbols. - _exit_function now works even if thread wakes up after gc collect. 2.7.3.28 - 2013-04-16 --------------------- - Pool: Fixed regression that disabled the deadlock fix in 2.7.3.24 - Pool: RestartFreqExceeded could be raised prematurely. - Process: Include pid in startup and process INFO logs. 2.7.3.27 - 2013-04-12 --------------------- - Manager now works again. - Python 3 fixes for billiard.connection. - Fixed invalid argument bug when running on Python 3.3 Fix contributed by Nathan Wan. - Ignore OSError when setting up signal handlers. 2.7.3.26 - 2013-04-09 --------------------- - Pool: Child processes must ignore SIGINT. 2.7.3.25 - 2013-04-09 --------------------- - Pool: 2.7.3.24 broke support for subprocesses (Issue #48). Signals that should be ignored were instead handled by terminating. 2.7.3.24 - 2013-04-08 --------------------- - Pool: Make sure finally blocks are called when process exits due to a signal. This fixes a deadlock problem when the process is killed while having acquired the shared semaphore. However, this solution does not protect against the processes being killed, a more elaborate solution is required for that. Hopefully this will be fixed soon in a later version. - Pool: Can now use GDB to debug pool child processes. - Fixes Python 3 compatibility problems. Contributed by Albertas Agejevas. 2.7.3.23 - 2013-03-22 --------------------- - Windows: Now catches SystemExit from setuptools while trying to build the C extension (Issue #41). 2.7.3.22 - 2013-03-08 --------------------- - Pool: apply_async now supports a ``callbacks_propagate`` keyword argument that can be a tuple of exceptions to propagate in callbacks. (callback, errback, accept_callback, timeout_callback). - Errors are no longer logged for OK and recycle exit codes. This would cause normal maxtasksperchild recycled process to log an error. - Fixed Python 2.5 compatibility problem (Issue #33). - FreeBSD: Compilation now disables semaphores if Python was built without it (Issue #40). Contributed by William Grzybowski 2.7.3.21 - 2013-02-11 --------------------- - Fixed typo EX_REUSE -> EX_RECYCLE - Code now conforms to new pep8.py rules. 2.7.3.20 - 2013-02-08 --------------------- - Pool: Disable restart limit if maxR is not set. - Pool: Now uses os.kill instead of signal.signal. Contributed by Lukasz Langa - Fixed name error in process.py - Pool: ApplyResult.get now properly raises exceptions. Fix contributed by xentac. 2.7.3.19 - 2012-11-30 --------------------- - Fixes problem at shutdown when gc has collected symbols. - Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32). - Fixes Python 3 compatibility issues 2.7.3.18 - 2012-11-05 --------------------- - [Pool] Fix for check_timeouts if not set. Fix contributed by Dmitry Sukhov - Fixed pickle problem with Traceback. Code.frame.__loader__ is now ignored as it may be set to an unpickleable object. - The Django old-layout warning was always showing. 2.7.3.17 - 2012-09-26 --------------------- - Fixes typo 2.7.3.16 - 2012-09-26 --------------------- - Windows: Fixes for SemLock._rebuild (Issue #24). - Pool: Job terminated with terminate_job now raises billiard.exceptions.Terminated. 2.7.3.15 - 2012-09-21 --------------------- - Windows: Fixes unpickling of SemLock when using fallback. - Windows: Fixes installation when no C compiler. 2.7.3.14 - 2012-09-20 --------------------- - Installation now works again for Python 3. 2.7.3.13 - 2012-09-14 --------------------- - Merged with Python trunk (many authors, many fixes: see Python changelog in trunk). - Using execv now also works with older Django projects using setup_environ (Issue #10). - Billiard now installs with a warning that the C extension could not be built if a compiler is not installed or the build fails in some other way. It really is recommended to have the C extension installed when running with force execv, but this change also makes it easier to install. - Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions cannot block the signal. Python signal handlers are called in the interpreter, so they cannot be called while a C extension is blocking the interpreter from running. - Now uses a timeout value for Thread.join that doesn't exceed the maximum on some platforms. - Fixed bug in the SemLock fallback used when C extensions not installed. Fix contributed by Mher Movsisyan. - Pool: Now sets a Process.index attribute for every process in the pool. This number will always be between 0 and concurrency-1, and can be used to e.g. create a logfile for each process in the pool without creating a new logfile whenever a process is replaced. 2.7.3.12 - 2012-08-05 --------------------- - Fixed Python 2.5 compatibility issue. - New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError 2.7.3.11 - 2012-08-01 --------------------- - Adds support for FreeBSD 7+ Fix contributed by koobs. - Pool: New argument ``allow_restart`` is now required to enable the pool process sentinel that is required to restart the pool. It's disabled by default, which reduces the number of file descriptors/semaphores required to run the pool. - Pool: Now emits a warning if a worker process exited with error-code. But not if the error code is 155, which is now returned if the worker process was recycled (maxtasksperchild). - Python 3 compatibility fixes. - Python 2.5 compatibility fixes. 2.7.3.10 - 2012-06-26 --------------------- - The ``TimeLimitExceeded`` exception string representation only included the seconds as a number, it now gives a more human friendly description. - Fixed typo in ``LaxBoundedSemaphore.shrink``. - Pool: ``ResultHandler.handle_event`` no longer requires any arguments. - setup.py bdist now works 2.7.3.9 - 2012-06-03 -------------------- - Environment variable ``MP_MAIN_FILE`` envvar is now set to the path of the ``__main__`` module when execv is enabled. - Pool: Errors occurring in the TaskHandler are now reported. 2.7.3.8 - 2012-06-01 -------------------- - Can now be installed on Py 3.2 - Issue #12091: simplify ApplyResult and MapResult with threading.Event Patch by Charles-Francois Natali - Pool: Support running without TimeoutHandler thread. - The with_*_thread arguments has also been replaced with a single `threads=True` argument. - Two new pool callbacks: - ``on_timeout_set(job, soft, hard)`` Applied when a task is executed with a timeout. - ``on_timeout_cancel(job)`` Applied when a timeout is cancelled (the job completed) 2.7.3.7 - 2012-05-21 -------------------- - Fixes Python 2.5 support. 2.7.3.6 - 2012-05-21 -------------------- - Pool: Can now be used in an event loop, without starting the supporting threads (TimeoutHandler still not supported) To facilitate this the pool has gained the following keyword arguments: - ``with_task_thread`` - ``with_result_thread`` - ``with_supervisor_thread`` - ``on_process_up`` Callback called with Process instance as argument whenever a new worker process is added. Used to add new process fds to the eventloop:: def on_process_up(proc): hub.add_reader(proc.sentinel, pool.maintain_pool) - ``on_process_down`` Callback called with Process instance as argument whenever a new worker process is found dead. Used to remove process fds from the eventloop:: def on_process_down(proc): hub.remove(proc.sentinel) - ``semaphore`` Sets the semaphore used to protect from adding new items to the pool when no processes available. The default is a threaded one, so this can be used to change to an async semaphore. And the following attributes:: - ``readers`` A map of ``fd`` -> ``callback``, to be registered in an eventloop. Currently this is only the result outqueue with a callback that processes all currently incoming results. And the following methods:: - ``did_start_ok`` To be called after starting the pool, and after setting up the eventloop with the pool fds, to ensure that the worker processes didn't immediately exit caused by an error (internal/memory). - ``maintain_pool`` Public version of ``_maintain_pool`` that handles max restarts. - Pool: Process too frequent restart protection now only counts if the process had a non-successful exit-code. This to take into account the maxtasksperchild option, and allowing processes to exit cleanly on their own. - Pool: New options max_restart + max_restart_freq This means that the supervisor can't restart processes faster than max_restart' times per max_restart_freq seconds (like the Erlang supervisor maxR & maxT settings). The pool is closed and joined if the max restart frequency is exceeded, where previously it would keep restarting at an unlimited rate, possibly crashing the system. The current default value is to stop if it exceeds 100 * process_count restarts in 1 seconds. This may change later. It will only count processes with an unsuccessful exit code, this is to take into account the ``maxtasksperchild`` setting and code that voluntarily exits. - Pool: The ``WorkerLostError`` message now includes the exit-code of the process that disappeared. 2.7.3.5 - 2012-05-09 -------------------- - Now always cleans up after ``sys.exc_info()`` to avoid cyclic references. - ExceptionInfo without arguments now defaults to ``sys.exc_info``. - Forking can now be disabled using the ``MULTIPROCESSING_FORKING_DISABLE`` environment variable. Also this envvar is set so that the behavior is inherited after execv. - The semaphore cleanup process started when execv is used now sets a useful process name if the ``setproctitle`` module is installed. - Sets the ``FORKED_BY_MULTIPROCESSING`` environment variable if forking is disabled. 2.7.3.4 - 2012-04-27 -------------------- - Added `billiard.ensure_multiprocessing()` Raises NotImplementedError if the platform does not support multiprocessing (e.g. Jython). 2.7.3.3 - 2012-04-23 -------------------- - PyPy now falls back to using its internal _multiprocessing module, so everything works except for forking_enable(False) (which silently degrades). - Fixed Python 2.5 compat. issues. - Uses more with statements - Merged some of the changes from the Python 3 branch. 2.7.3.2 - 2012-04-20 -------------------- - Now installs on PyPy/Jython (but does not work). 2.7.3.1 - 2012-04-20 -------------------- - Python 2.5 support added. 2.7.3.0 - 2012-04-20 -------------------- - Updated from Python 2.7.3 - Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7. (may consider py3k support at some point). - Pool improvements from Celery. - no-execv patch added (http://bugs.python.org/issue8713) Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: C Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: System :: Distributed Computing billiard-3.3.0.15/README.rst0000644000076500000000000000243412276217473015642 0ustar asksolwheel00000000000000======== billiard ======== :version: 3.3.0.15 About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant is intended to be compatible with Python 2.4 and 2.5, and will draw it's fixes/improvements from python-trunk. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessin backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. .. image:: https://d2weczhvl823v0.cloudfront.net/celery/billiard/trend.png :alt: Bitdeli badge :target: https://bitdeli.com/free billiard-3.3.0.15/requirements/0000755000076500000000000000000012276217622016667 5ustar asksolwheel00000000000000billiard-3.3.0.15/requirements/test-ci.txt0000644000076500000000000000006112270022117020760 0ustar asksolwheel00000000000000coverage>=3.0 redis pymongo SQLAlchemy PyOpenSSL billiard-3.3.0.15/requirements/test.txt0000644000076500000000000000004712270022117020373 0ustar asksolwheel00000000000000unittest2>=0.4.0 nose nose-cover3 mock billiard-3.3.0.15/requirements/test3.txt0000644000076500000000000000002612270022117020453 0ustar asksolwheel00000000000000nose nose-cover3 mock billiard-3.3.0.15/setup.cfg0000644000076500000000000000027312276217622015767 0ustar asksolwheel00000000000000[nosetests] where = billiard/tests cover3-branch = 1 cover3-html = 1 cover3-package = billiard cover3-exclude = billiard.tests [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 billiard-3.3.0.15/setup.py0000644000076500000000000002002112271741474015653 0ustar asksolwheel00000000000000from __future__ import print_function import os import sys import glob try: from setuptools import setup, Extension, find_packages except ImportError: from distutils.core import setup, Extension, find_packages # noqa from distutils import sysconfig from distutils.errors import ( CCompilerError, DistutilsExecError, DistutilsPlatformError ) HERE = os.path.dirname(os.path.abspath(__file__)) ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32' and sys.version_info >= (2, 6): # distutils.msvc9compiler can raise IOError if the compiler is missing ext_errors += (IOError, ) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') is_py3k = sys.version_info[0] == 3 BUILD_WARNING = """ ----------------------------------------------------------------------- WARNING: The C extensions could not be compiled ----------------------------------------------------------------------- Maybe you do not have a C compiler installed on this system? The reason was: %s This is just a warning as most of the functionality will work even without the updated C extension. It will simply fallback to the built-in _multiprocessing module. Most notably you will not be able to use FORCE_EXECV on POSIX systems. If this is a problem for you then please install a C compiler or fix the error(s) above. ----------------------------------------------------------------------- """ # -*- py3k -*- extras = {} # -*- Distribution Meta -*- import re re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)') re_doc = re.compile(r'^"""(.+?)"""') rq = lambda s: s.strip("\"'") def add_default(m): attr_name, attr_value = m.groups() return ((attr_name, rq(attr_value)), ) def add_version(m): v = list(map(rq, m.groups()[0].split(', '))) return (('VERSION', '.'.join(v[0:4]) + ''.join(v[4:])), ) def add_doc(m): return (('doc', m.groups()[0]), ) pats = {re_meta: add_default, re_vers: add_version, re_doc: add_doc} here = os.path.abspath(os.path.dirname(__file__)) meta_fh = open(os.path.join(here, 'billiard/__init__.py')) try: meta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: meta.update(handler(m)) finally: meta_fh.close() if sys.version_info < (2, 5): raise ValueError('Versions of Python before 2.5 are not supported') if sys.platform == 'win32': # Windows macros = dict() libraries = ['ws2_32'] elif sys.platform.startswith('darwin'): # Mac OSX macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, HAVE_BROKEN_SEM_GETVALUE=1 ) libraries = [] elif sys.platform.startswith('cygwin'): # Cygwin macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=0, HAVE_BROKEN_SEM_UNLINK=1 ) libraries = [] elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'): # FreeBSD's P1003.1b semaphore support is very experimental # and has many known problems. (as of June 2008) macros = dict( # FreeBSD 4-6 HAVE_SEM_OPEN=0, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] elif sys.platform in ('freebsd7', 'freebsd8', 'freebsd9', 'freebsd10'): macros = dict( # FreeBSD 7+ HAVE_SEM_OPEN=bool( sysconfig.get_config_var('HAVE_SEM_OPEN') and not bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')) ), HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = [] elif sys.platform.startswith('openbsd'): macros = dict( # OpenBSD HAVE_SEM_OPEN=0, # Not implemented HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] else: # Linux and other unices macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = ['rt'] if sys.platform == 'win32': multiprocessing_srcs = [ 'Modules/_billiard/multiprocessing.c', 'Modules/_billiard/semaphore.c', 'Modules/_billiard/pipe_connection.c', 'Modules/_billiard/socket_connection.c', 'Modules/_billiard/win32_functions.c', ] else: multiprocessing_srcs = [ 'Modules/_billiard/multiprocessing.c', 'Modules/_billiard/socket_connection.c', ] if macros.get('HAVE_SEM_OPEN', False): multiprocessing_srcs.append('Modules/_billiard/semaphore.c') long_description = open(os.path.join(HERE, 'README.rst')).read() long_description += """ =========== Changes =========== """ long_description += open(os.path.join(HERE, 'CHANGES.txt')).read() if not is_py3k: long_description = long_description.encode('ascii', 'replace') # -*- Installation Requires -*- py_version = sys.version_info is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') def strip_comments(l): return l.split('#', 1)[0].strip() def reqs(f): return list(filter(None, [strip_comments(l) for l in open( os.path.join(os.getcwd(), 'requirements', f)).readlines()])) if py_version[0] == 3: tests_require = reqs('test3.txt') else: tests_require = reqs('test.txt') def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')): for arg in argv: if arg.startswith(cmds): return arg def run_setup(with_extensions=True): extensions = [] if with_extensions: extensions = [ Extension( '_billiard', sources=multiprocessing_srcs, define_macros=macros.items(), libraries=libraries, include_dirs=['Modules/_billiard'], depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'], ), ] exclude = 'billiard.py2' if is_py3k else 'billiard.py3' packages = find_packages(exclude=[ 'ez_setup', 'tests', 'funtests.*', 'tests.*', exclude, ]) setup( name='billiard', version=meta['VERSION'], description=meta['doc'], long_description=long_description, packages=packages, ext_modules=extensions, author=meta['author'], author_email=meta['author_email'], maintainer=meta['maintainer'], maintainer_email=meta['contact'], url=meta['homepage'], zip_safe=False, license='BSD', tests_require=tests_require, test_suite='nose.collector', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: C', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: Jython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'License :: OSI Approved :: BSD License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Distributed Computing', ], **extras ) try: run_setup(not (is_jython or is_pypy or is_py3k)) except BaseException: if _is_build_command(sys.argv): import traceback print(BUILD_WARNING % '\n'.join(traceback.format_stack()), file=sys.stderr) run_setup(False) else: raise